diff options
Diffstat (limited to 'drivers')
2600 files changed, 100259 insertions, 35572 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5eef4cb4f70e..82b96ee8624c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -58,14 +58,25 @@ config ACPI_CCA_REQUIRED bool config ACPI_DEBUGGER - bool "AML debugger interface (EXPERIMENTAL)" + bool "AML debugger interface" select ACPI_DEBUG help - Enable in-kernel debugging of AML facilities: statistics, internal - object dump, single step control method execution. + Enable in-kernel debugging of AML facilities: statistics, + internal object dump, single step control method execution. This is still under development, currently enabling this only results in the compilation of the ACPICA debugger files. +if ACPI_DEBUGGER + +config ACPI_DEBUGGER_USER + tristate "Userspace debugger accessiblity" + depends on DEBUG_FS + help + Export /sys/kernel/debug/acpi/acpidbg for userspace utilities + to access the debugger functionalities. + +endif + config ACPI_SLEEP bool depends on SUSPEND || HIBERNATION diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 675eaf337178..cb648a49543a 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -8,13 +8,13 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # # ACPI Boot-Time Table Parsing # -obj-y += tables.o +obj-$(CONFIG_ACPI) += tables.o obj-$(CONFIG_X86) += blacklist.o # # ACPI Core Subsystem (Interpreter) # -obj-y += acpi.o \ +obj-$(CONFIG_ACPI) += acpi.o \ acpica/ # All the builtin files are in the "acpi." module_param namespace. @@ -66,10 +66,10 @@ obj-$(CONFIG_ACPI_FAN) += fan.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o -obj-y += container.o +obj-$(CONFIG_ACPI) += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-$(CONFIG_ACPI_NFIT) += nfit.o -obj-y += acpi_memhotplug.o +obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_SBS) += sbshc.o @@ -79,6 +79,7 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o obj-$(CONFIG_ACPI_BGRT) += bgrt.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o +obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o # processor has its own "processor." module_param namespace processor-y := processor_driver.o diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index a450e7af877c..d507cf6deda0 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -51,7 +51,7 @@ struct apd_private_data { const struct apd_device_desc *dev_desc; }; -#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE +#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) #define APD_ADDR(desc) ((unsigned long)&desc) static int acpi_apd_setup(struct apd_private_data *pdata) @@ -71,6 +71,7 @@ static int acpi_apd_setup(struct apd_private_data *pdata) return 0; } +#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE static struct apd_device_desc cz_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 133000000, @@ -80,6 +81,14 @@ static struct apd_device_desc cz_uart_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 48000000, }; +#endif + +#ifdef CONFIG_ARM64 +static struct apd_device_desc xgene_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 100000000, +}; +#endif #else @@ -132,9 +141,14 @@ static int acpi_apd_create_device(struct acpi_device *adev, static const struct acpi_device_id acpi_apd_device_ids[] = { /* Generic apd devices */ +#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE { "AMD0010", APD_ADDR(cz_i2c_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, +#endif +#ifdef CONFIG_ARM64 + { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, +#endif { } }; diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c new file mode 100644 index 000000000000..15e4604efba7 --- /dev/null +++ b/drivers/acpi/acpi_dbg.c @@ -0,0 +1,804 @@ +/* + * ACPI AML interfacing support + * + * Copyright (C) 2015, Intel Corporation + * Authors: Lv Zheng <lv.zheng@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* #define DEBUG */ +#define pr_fmt(fmt) "ACPI : AML: " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/wait.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/proc_fs.h> +#include <linux/debugfs.h> +#include <linux/circ_buf.h> +#include <linux/acpi.h> +#include "internal.h" + +#define ACPI_AML_BUF_ALIGN (sizeof (acpi_size)) +#define ACPI_AML_BUF_SIZE PAGE_SIZE + +#define circ_count(circ) \ + (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) +#define circ_count_to_end(circ) \ + (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) +#define circ_space(circ) \ + (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) +#define circ_space_to_end(circ) \ + (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) + +#define ACPI_AML_OPENED 0x0001 +#define ACPI_AML_CLOSED 0x0002 +#define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */ +#define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */ +#define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */ +#define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */ +#define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER) +#define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN) +#define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN) +#define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED) + +struct acpi_aml_io { + wait_queue_head_t wait; + unsigned long flags; + unsigned long users; + struct mutex lock; + struct task_struct *thread; + char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); + struct circ_buf out_crc; + char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); + struct circ_buf in_crc; + acpi_osd_exec_callback function; + void *context; + unsigned long usages; +}; + +static struct acpi_aml_io acpi_aml_io; +static bool acpi_aml_initialized; +static struct file *acpi_aml_active_reader; +static struct dentry *acpi_aml_dentry; + +static inline bool __acpi_aml_running(void) +{ + return acpi_aml_io.thread ? true : false; +} + +static inline bool __acpi_aml_access_ok(unsigned long flag) +{ + /* + * The debugger interface is in opened state (OPENED && !CLOSED), + * then it is allowed to access the debugger buffers from either + * user space or the kernel space. + * In addition, for the kernel space, only the debugger thread + * (thread ID matched) is allowed to access. + */ + if (!(acpi_aml_io.flags & ACPI_AML_OPENED) || + (acpi_aml_io.flags & ACPI_AML_CLOSED) || + !__acpi_aml_running()) + return false; + if ((flag & ACPI_AML_KERN) && + current != acpi_aml_io.thread) + return false; + return true; +} + +static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag) +{ + /* + * Another read is not in progress and there is data in buffer + * available for read. + */ + if (!(acpi_aml_io.flags & flag) && circ_count(circ)) + return true; + return false; +} + +static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag) +{ + /* + * Another write is not in progress and there is buffer space + * available for write. + */ + if (!(acpi_aml_io.flags & flag) && circ_space(circ)) + return true; + return false; +} + +static inline bool __acpi_aml_busy(void) +{ + if (acpi_aml_io.flags & ACPI_AML_BUSY) + return true; + return false; +} + +static inline bool __acpi_aml_opened(void) +{ + if (acpi_aml_io.flags & ACPI_AML_OPEN) + return true; + return false; +} + +static inline bool __acpi_aml_used(void) +{ + return acpi_aml_io.usages ? true : false; +} + +static inline bool acpi_aml_running(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = __acpi_aml_running(); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_busy(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = __acpi_aml_busy(); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_used(void) +{ + bool ret; + + /* + * The usage count is prepared to avoid race conditions between the + * starts and the stops of the debugger thread. + */ + mutex_lock(&acpi_aml_io.lock); + ret = __acpi_aml_used(); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_kern_readable(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) || + __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_kern_writable(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) || + __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_user_readable(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) || + __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static bool acpi_aml_user_writable(void) +{ + bool ret; + + mutex_lock(&acpi_aml_io.lock); + ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) || + __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER); + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag) +{ + int ret = 0; + + mutex_lock(&acpi_aml_io.lock); + if (!__acpi_aml_access_ok(flag)) { + ret = -EFAULT; + goto out; + } + if (!__acpi_aml_writable(circ, flag)) { + ret = -EAGAIN; + goto out; + } + acpi_aml_io.flags |= flag; +out: + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag) +{ + int ret = 0; + + mutex_lock(&acpi_aml_io.lock); + if (!__acpi_aml_access_ok(flag)) { + ret = -EFAULT; + goto out; + } + if (!__acpi_aml_readable(circ, flag)) { + ret = -EAGAIN; + goto out; + } + acpi_aml_io.flags |= flag; +out: + mutex_unlock(&acpi_aml_io.lock); + return ret; +} + +static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup) +{ + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.flags &= ~flag; + if (wakeup) + wake_up_interruptible(&acpi_aml_io.wait); + mutex_unlock(&acpi_aml_io.lock); +} + +static int acpi_aml_write_kern(const char *buf, int len) +{ + int ret; + struct circ_buf *crc = &acpi_aml_io.out_crc; + int n; + char *p; + + ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); + if (IS_ERR_VALUE(ret)) + return ret; + /* sync tail before inserting logs */ + smp_mb(); + p = &crc->buf[crc->head]; + n = min(len, circ_space_to_end(crc)); + memcpy(p, buf, n); + /* sync head after inserting logs */ + smp_wmb(); + crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); + acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true); + return n; +} + +static int acpi_aml_readb_kern(void) +{ + int ret; + struct circ_buf *crc = &acpi_aml_io.in_crc; + char *p; + + ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); + if (IS_ERR_VALUE(ret)) + return ret; + /* sync head before removing cmds */ + smp_rmb(); + p = &crc->buf[crc->tail]; + ret = (int)*p; + /* sync tail before inserting cmds */ + smp_mb(); + crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1); + acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true); + return ret; +} + +/* + * acpi_aml_write_log() - Capture debugger output + * @msg: the debugger output + * + * This function should be used to implement acpi_os_printf() to filter out + * the debugger output and store the output into the debugger interface + * buffer. Return the size of stored logs or errno. + */ +static ssize_t acpi_aml_write_log(const char *msg) +{ + int ret = 0; + int count = 0, size = 0; + + if (!acpi_aml_initialized) + return -ENODEV; + if (msg) + count = strlen(msg); + while (count > 0) { +again: + ret = acpi_aml_write_kern(msg + size, count); + if (ret == -EAGAIN) { + ret = wait_event_interruptible(acpi_aml_io.wait, + acpi_aml_kern_writable()); + /* + * We need to retry when the condition + * becomes true. + */ + if (ret == 0) + goto again; + break; + } + if (IS_ERR_VALUE(ret)) + break; + size += ret; + count -= ret; + } + return size > 0 ? size : ret; +} + +/* + * acpi_aml_read_cmd() - Capture debugger input + * @msg: the debugger input + * @size: the size of the debugger input + * + * This function should be used to implement acpi_os_get_line() to capture + * the debugger input commands and store the input commands into the + * debugger interface buffer. Return the size of stored commands or errno. + */ +static ssize_t acpi_aml_read_cmd(char *msg, size_t count) +{ + int ret = 0; + int size = 0; + + /* + * This is ensured by the running fact of the debugger thread + * unless a bug is introduced. + */ + BUG_ON(!acpi_aml_initialized); + while (count > 0) { +again: + /* + * Check each input byte to find the end of the command. + */ + ret = acpi_aml_readb_kern(); + if (ret == -EAGAIN) { + ret = wait_event_interruptible(acpi_aml_io.wait, + acpi_aml_kern_readable()); + /* + * We need to retry when the condition becomes + * true. + */ + if (ret == 0) + goto again; + } + if (IS_ERR_VALUE(ret)) + break; + *(msg + size) = (char)ret; + size++; + count--; + if (ret == '\n') { + /* + * acpi_os_get_line() requires a zero terminated command + * string. + */ + *(msg + size - 1) = '\0'; + break; + } + } + return size > 0 ? size : ret; +} + +static int acpi_aml_thread(void *unsed) +{ + acpi_osd_exec_callback function = NULL; + void *context; + + mutex_lock(&acpi_aml_io.lock); + if (acpi_aml_io.function) { + acpi_aml_io.usages++; + function = acpi_aml_io.function; + context = acpi_aml_io.context; + } + mutex_unlock(&acpi_aml_io.lock); + + if (function) + function(context); + + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.usages--; + if (!__acpi_aml_used()) { + acpi_aml_io.thread = NULL; + wake_up(&acpi_aml_io.wait); + } + mutex_unlock(&acpi_aml_io.lock); + + return 0; +} + +/* + * acpi_aml_create_thread() - Create AML debugger thread + * @function: the debugger thread callback + * @context: the context to be passed to the debugger thread + * + * This function should be used to implement acpi_os_execute() which is + * used by the ACPICA debugger to create the debugger thread. + */ +static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context) +{ + struct task_struct *t; + + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.function = function; + acpi_aml_io.context = context; + mutex_unlock(&acpi_aml_io.lock); + + t = kthread_create(acpi_aml_thread, NULL, "aml"); + if (IS_ERR(t)) { + pr_err("Failed to create AML debugger thread.\n"); + return PTR_ERR(t); + } + + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.thread = t; + acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t); + wake_up_process(t); + mutex_unlock(&acpi_aml_io.lock); + return 0; +} + +static int acpi_aml_wait_command_ready(bool single_step, + char *buffer, size_t length) +{ + acpi_status status; + + if (single_step) + acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT); + else + acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT); + + status = acpi_os_get_line(buffer, length, NULL); + if (ACPI_FAILURE(status)) + return -EINVAL; + return 0; +} + +static int acpi_aml_notify_command_complete(void) +{ + return 0; +} + +static int acpi_aml_open(struct inode *inode, struct file *file) +{ + int ret = 0; + acpi_status status; + + mutex_lock(&acpi_aml_io.lock); + /* + * The debugger interface is being closed, no new user is allowed + * during this period. + */ + if (acpi_aml_io.flags & ACPI_AML_CLOSED) { + ret = -EBUSY; + goto err_lock; + } + if ((file->f_flags & O_ACCMODE) != O_WRONLY) { + /* + * Only one reader is allowed to initiate the debugger + * thread. + */ + if (acpi_aml_active_reader) { + ret = -EBUSY; + goto err_lock; + } else { + pr_debug("Opening debugger reader.\n"); + acpi_aml_active_reader = file; + } + } else { + /* + * No writer is allowed unless the debugger thread is + * ready. + */ + if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) { + ret = -ENODEV; + goto err_lock; + } + } + if (acpi_aml_active_reader == file) { + pr_debug("Opening debugger interface.\n"); + mutex_unlock(&acpi_aml_io.lock); + + pr_debug("Initializing debugger thread.\n"); + status = acpi_initialize_debugger(); + if (ACPI_FAILURE(status)) { + pr_err("Failed to initialize debugger.\n"); + ret = -EINVAL; + goto err_exit; + } + pr_debug("Debugger thread initialized.\n"); + + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.flags |= ACPI_AML_OPENED; + acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0; + acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0; + pr_debug("Debugger interface opened.\n"); + } + acpi_aml_io.users++; +err_lock: + if (IS_ERR_VALUE(ret)) { + if (acpi_aml_active_reader == file) + acpi_aml_active_reader = NULL; + } + mutex_unlock(&acpi_aml_io.lock); +err_exit: + return ret; +} + +static int acpi_aml_release(struct inode *inode, struct file *file) +{ + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.users--; + if (file == acpi_aml_active_reader) { + pr_debug("Closing debugger reader.\n"); + acpi_aml_active_reader = NULL; + + pr_debug("Closing debugger interface.\n"); + acpi_aml_io.flags |= ACPI_AML_CLOSED; + + /* + * Wake up all user space/kernel space blocked + * readers/writers. + */ + wake_up_interruptible(&acpi_aml_io.wait); + mutex_unlock(&acpi_aml_io.lock); + /* + * Wait all user space/kernel space readers/writers to + * stop so that ACPICA command loop of the debugger thread + * should fail all its command line reads after this point. + */ + wait_event(acpi_aml_io.wait, !acpi_aml_busy()); + + /* + * Then we try to terminate the debugger thread if it is + * not terminated. + */ + pr_debug("Terminating debugger thread.\n"); + acpi_terminate_debugger(); + wait_event(acpi_aml_io.wait, !acpi_aml_used()); + pr_debug("Debugger thread terminated.\n"); + + mutex_lock(&acpi_aml_io.lock); + acpi_aml_io.flags &= ~ACPI_AML_OPENED; + } + if (acpi_aml_io.users == 0) { + pr_debug("Debugger interface closed.\n"); + acpi_aml_io.flags &= ~ACPI_AML_CLOSED; + } + mutex_unlock(&acpi_aml_io.lock); + return 0; +} + +static int acpi_aml_read_user(char __user *buf, int len) +{ + int ret; + struct circ_buf *crc = &acpi_aml_io.out_crc; + int n; + char *p; + + ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); + if (IS_ERR_VALUE(ret)) + return ret; + /* sync head before removing logs */ + smp_rmb(); + p = &crc->buf[crc->tail]; + n = min(len, circ_count_to_end(crc)); + if (copy_to_user(buf, p, n)) { + ret = -EFAULT; + goto out; + } + /* sync tail after removing logs */ + smp_mb(); + crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); + ret = n; +out: + acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret)); + return ret; +} + +static ssize_t acpi_aml_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = 0; + int size = 0; + + if (!count) + return 0; + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + while (count > 0) { +again: + ret = acpi_aml_read_user(buf + size, count); + if (ret == -EAGAIN) { + if (file->f_flags & O_NONBLOCK) + break; + else { + ret = wait_event_interruptible(acpi_aml_io.wait, + acpi_aml_user_readable()); + /* + * We need to retry when the condition + * becomes true. + */ + if (ret == 0) + goto again; + } + } + if (IS_ERR_VALUE(ret)) { + if (!acpi_aml_running()) + ret = 0; + break; + } + if (ret) { + size += ret; + count -= ret; + *ppos += ret; + break; + } + } + return size > 0 ? size : ret; +} + +static int acpi_aml_write_user(const char __user *buf, int len) +{ + int ret; + struct circ_buf *crc = &acpi_aml_io.in_crc; + int n; + char *p; + + ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); + if (IS_ERR_VALUE(ret)) + return ret; + /* sync tail before inserting cmds */ + smp_mb(); + p = &crc->buf[crc->head]; + n = min(len, circ_space_to_end(crc)); + if (copy_from_user(p, buf, n)) { + ret = -EFAULT; + goto out; + } + /* sync head after inserting cmds */ + smp_wmb(); + crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); + ret = n; +out: + acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret)); + return n; +} + +static ssize_t acpi_aml_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = 0; + int size = 0; + + if (!count) + return 0; + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + while (count > 0) { +again: + ret = acpi_aml_write_user(buf + size, count); + if (ret == -EAGAIN) { + if (file->f_flags & O_NONBLOCK) + break; + else { + ret = wait_event_interruptible(acpi_aml_io.wait, + acpi_aml_user_writable()); + /* + * We need to retry when the condition + * becomes true. + */ + if (ret == 0) + goto again; + } + } + if (IS_ERR_VALUE(ret)) { + if (!acpi_aml_running()) + ret = 0; + break; + } + if (ret) { + size += ret; + count -= ret; + *ppos += ret; + } + } + return size > 0 ? size : ret; +} + +static unsigned int acpi_aml_poll(struct file *file, poll_table *wait) +{ + int masks = 0; + + poll_wait(file, &acpi_aml_io.wait, wait); + if (acpi_aml_user_readable()) + masks |= POLLIN | POLLRDNORM; + if (acpi_aml_user_writable()) + masks |= POLLOUT | POLLWRNORM; + + return masks; +} + +static const struct file_operations acpi_aml_operations = { + .read = acpi_aml_read, + .write = acpi_aml_write, + .poll = acpi_aml_poll, + .open = acpi_aml_open, + .release = acpi_aml_release, + .llseek = generic_file_llseek, +}; + +static const struct acpi_debugger_ops acpi_aml_debugger = { + .create_thread = acpi_aml_create_thread, + .read_cmd = acpi_aml_read_cmd, + .write_log = acpi_aml_write_log, + .wait_command_ready = acpi_aml_wait_command_ready, + .notify_command_complete = acpi_aml_notify_command_complete, +}; + +int __init acpi_aml_init(void) +{ + int ret = 0; + + if (!acpi_debugfs_dir) { + ret = -ENOENT; + goto err_exit; + } + + /* Initialize AML IO interface */ + mutex_init(&acpi_aml_io.lock); + init_waitqueue_head(&acpi_aml_io.wait); + acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf; + acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf; + acpi_aml_dentry = debugfs_create_file("acpidbg", + S_IFREG | S_IRUGO | S_IWUSR, + acpi_debugfs_dir, NULL, + &acpi_aml_operations); + if (acpi_aml_dentry == NULL) { + ret = -ENODEV; + goto err_exit; + } + ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); + if (ret) + goto err_fs; + acpi_aml_initialized = true; + +err_fs: + if (ret) { + debugfs_remove(acpi_aml_dentry); + acpi_aml_dentry = NULL; + } +err_exit: + return ret; +} + +void __exit acpi_aml_exit(void) +{ + if (acpi_aml_initialized) { + acpi_unregister_debugger(&acpi_aml_debugger); + if (acpi_aml_dentry) { + debugfs_remove(acpi_aml_dentry); + acpi_aml_dentry = NULL; + } + acpi_aml_initialized = false; + } +} + +module_init(acpi_aml_init); +module_exit(acpi_aml_exit); + +MODULE_AUTHOR("Lv Zheng"); +MODULE_DESCRIPTION("ACPI debugger userspace IO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f9e0d09f7c66..047281a6ae11 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -15,6 +15,7 @@ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/platform_data/clk-lpss.h> #include <linux/pm_runtime.h> @@ -26,6 +27,10 @@ ACPI_MODULE_NAME("acpi_lpss"); #ifdef CONFIG_X86_INTEL_LPSS +#include <asm/cpu_device_id.h> +#include <asm/iosf_mbi.h> +#include <asm/pmc_atom.h> + #define LPSS_ADDR(desc) ((unsigned long)&desc) #define LPSS_CLK_SIZE 0x04 @@ -71,7 +76,7 @@ struct lpss_device_desc { void (*setup)(struct lpss_private_data *pdata); }; -static struct lpss_device_desc lpss_dma_desc = { +static const struct lpss_device_desc lpss_dma_desc = { .flags = LPSS_CLK, }; @@ -84,6 +89,23 @@ struct lpss_private_data { u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; }; +/* LPSS run time quirks */ +static unsigned int lpss_quirks; + +/* + * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. + * + * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover + * it can be powered off automatically whenever the last LPSS device goes down. + * In case of no power any access to the DMA controller will hang the system. + * The behaviour is reproduced on some HP laptops based on Intel BayTrail as + * well as on ASuS T100TA transformer. + * + * This quirk overrides power state of entire LPSS island to keep DMA powered + * on whenever we have at least one other device in use. + */ +#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) + /* UART Component Parameter Register */ #define LPSS_UART_CPR 0xF4 #define LPSS_UART_CPR_AFCE BIT(4) @@ -196,13 +218,21 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = { .setup = byt_i2c_setup, }; -static struct lpss_device_desc bsw_spi_dev_desc = { +static const struct lpss_device_desc bsw_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, .prv_offset = 0x400, .setup = lpss_deassert_reset, }; +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id lpss_cpu_ids[] = { + ICPU(0x37), /* Valleyview, Bay Trail */ + ICPU(0x4c), /* Braswell, Cherry Trail */ + {} +}; + #else #define LPSS_ADDR(desc) (0UL) @@ -574,6 +604,17 @@ static void acpi_lpss_restore_ctx(struct device *dev, { unsigned int i; + for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { + unsigned long offset = i * sizeof(u32); + + __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); + dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", + pdata->prv_reg_ctx[i], offset); + } +} + +static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) +{ /* * The following delay is needed or the subsequent write operations may * fail. The LPSS devices are actually PCI devices and the PCI spec @@ -586,14 +627,34 @@ static void acpi_lpss_restore_ctx(struct device *dev, delay = 0; msleep(delay); +} - for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { - unsigned long offset = i * sizeof(u32); +static int acpi_lpss_activate(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; - __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); - dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", - pdata->prv_reg_ctx[i], offset); - } + ret = acpi_dev_runtime_resume(dev); + if (ret) + return ret; + + acpi_lpss_d3_to_d0_delay(pdata); + + /* + * This is called only on ->probe() stage where a device is either in + * known state defined by BIOS or most likely powered off. Due to this + * we have to deassert reset line to be sure that ->probe() will + * recognize the device. + */ + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + lpss_deassert_reset(pdata); + + return 0; +} + +static void acpi_lpss_dismiss(struct device *dev) +{ + acpi_dev_runtime_suspend(dev); } #ifdef CONFIG_PM_SLEEP @@ -621,6 +682,8 @@ static int acpi_lpss_resume_early(struct device *dev) if (ret) return ret; + acpi_lpss_d3_to_d0_delay(pdata); + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_restore_ctx(dev, pdata); @@ -628,6 +691,89 @@ static int acpi_lpss_resume_early(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ +/* IOSF SB for LPSS island */ +#define LPSS_IOSF_UNIT_LPIOEP 0xA0 +#define LPSS_IOSF_UNIT_LPIO1 0xAB +#define LPSS_IOSF_UNIT_LPIO2 0xAC + +#define LPSS_IOSF_PMCSR 0x84 +#define LPSS_PMCSR_D0 0 +#define LPSS_PMCSR_D3hot 3 +#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) + +#define LPSS_IOSF_GPIODEF0 0x154 +#define LPSS_GPIODEF0_DMA1_D3 BIT(2) +#define LPSS_GPIODEF0_DMA2_D3 BIT(3) +#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) + +static DEFINE_MUTEX(lpss_iosf_mutex); + +static void lpss_iosf_enter_d3_state(void) +{ + u32 value1 = 0; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK; + u32 value2 = LPSS_PMCSR_D3hot; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + /* + * PMC provides an information about actual status of the LPSS devices. + * Here we read the values related to LPSS power island, i.e. LPSS + * devices, excluding both LPSS DMA controllers, along with SCC domain. + */ + u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe; + int ret; + + ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); + if (ret) + return; + + mutex_lock(&lpss_iosf_mutex); + + ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); + if (ret) + goto exit; + + /* + * Get the status of entire LPSS power island per device basis. + * Shutdown both LPSS DMA controllers if and only if all other devices + * are already in D3hot. + */ + pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask; + if (pmc_status) + goto exit; + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); +exit: + mutex_unlock(&lpss_iosf_mutex); +} + +static void lpss_iosf_exit_d3_state(void) +{ + u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK; + u32 value2 = LPSS_PMCSR_D0; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + + mutex_lock(&lpss_iosf_mutex); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + mutex_unlock(&lpss_iosf_mutex); +} + static int acpi_lpss_runtime_suspend(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); @@ -640,7 +786,17 @@ static int acpi_lpss_runtime_suspend(struct device *dev) if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_save_ctx(dev, pdata); - return acpi_dev_runtime_suspend(dev); + ret = acpi_dev_runtime_suspend(dev); + + /* + * This call must be last in the sequence, otherwise PMC will return + * wrong status for devices being about to be powered off. See + * lpss_iosf_enter_d3_state() for further information. + */ + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_enter_d3_state(); + + return ret; } static int acpi_lpss_runtime_resume(struct device *dev) @@ -648,10 +804,19 @@ static int acpi_lpss_runtime_resume(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; + /* + * This call is kept first to be in symmetry with + * acpi_lpss_runtime_suspend() one. + */ + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_exit_d3_state(); + ret = acpi_dev_runtime_resume(dev); if (ret) return ret; + acpi_lpss_d3_to_d0_delay(pdata); + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_restore_ctx(dev, pdata); @@ -660,6 +825,10 @@ static int acpi_lpss_runtime_resume(struct device *dev) #endif /* CONFIG_PM */ static struct dev_pm_domain acpi_lpss_pm_domain = { +#ifdef CONFIG_PM + .activate = acpi_lpss_activate, + .dismiss = acpi_lpss_dismiss, +#endif .ops = { #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP @@ -705,8 +874,14 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, } switch (action) { - case BUS_NOTIFY_ADD_DEVICE: + case BUS_NOTIFY_BIND_DRIVER: pdev->dev.pm_domain = &acpi_lpss_pm_domain; + break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: + case BUS_NOTIFY_UNBOUND_DRIVER: + pdev->dev.pm_domain = NULL; + break; + case BUS_NOTIFY_ADD_DEVICE: if (pdata->dev_desc->flags & LPSS_LTR) return sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group); @@ -714,7 +889,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, case BUS_NOTIFY_DEL_DEVICE: if (pdata->dev_desc->flags & LPSS_LTR) sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); - pdev->dev.pm_domain = NULL; break; default: break; @@ -754,10 +928,19 @@ static struct acpi_scan_handler lpss_handler = { void __init acpi_lpss_init(void) { - if (!lpt_clk_init()) { - bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); - acpi_scan_add_handler(&lpss_handler); - } + const struct x86_cpu_id *id; + int ret; + + ret = lpt_clk_init(); + if (ret) + return; + + id = x86_match_cpu(lpss_cpu_ids); + if (id) + lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; + + bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); + acpi_scan_add_handler(&lpss_handler); } #else diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 48fc3ad13a4b..67d97c0090a2 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -367,7 +367,7 @@ static struct acpi_scan_handler acpi_pnp_handler = { */ static int is_cmos_rtc_device(struct acpi_device *adev) { - struct acpi_device_id ids[] = { + static const struct acpi_device_id ids[] = { { "PNP0B00" }, { "PNP0B01" }, { "PNP0B02" }, diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3405f7a41e25..06a006ff89b0 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -77,14 +77,21 @@ module_param(allow_duplicates, bool, 0644); static int disable_backlight_sysfs_if = -1; module_param(disable_backlight_sysfs_if, int, 0444); +#define REPORT_OUTPUT_KEY_EVENTS 0x01 +#define REPORT_BRIGHTNESS_KEY_EVENTS 0x02 +static int report_key_events = -1; +module_param(report_key_events, int, 0644); +MODULE_PARM_DESC(report_key_events, + "0: none, 1: output changes, 2: brightness changes, 3: all"); + static bool device_id_scheme = false; module_param(device_id_scheme, bool, 0444); static bool only_lcd = false; module_param(only_lcd, bool, 0444); -static int register_count; -static DEFINE_MUTEX(register_count_mutex); +static DECLARE_COMPLETION(register_done); +static DEFINE_MUTEX(register_done_mutex); static struct mutex video_list_lock; static struct list_head video_bus_head; static int acpi_video_bus_add(struct acpi_device *device); @@ -412,6 +419,13 @@ static int video_enable_only_lcd(const struct dmi_system_id *d) return 0; } +static int video_set_report_key_events(const struct dmi_system_id *id) +{ + if (report_key_events == -1) + report_key_events = (uintptr_t)id->driver_data; + return 0; +} + static struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -500,6 +514,24 @@ static struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"), }, }, + /* + * Some machines report wrong key events on the acpi-bus, suppress + * key event reporting on these. Note this is only intended to work + * around events which are plain wrong. In some cases we get double + * events, in this case acpi-video is considered the canonical source + * and the events from the other source should be filtered. E.g. + * by calling acpi_video_handles_brightness_key_presses() from the + * vendor acpi/wmi driver or by using /lib/udev/hwdb.d/60-keyboard.hwdb + */ + { + .callback = video_set_report_key_events, + .driver_data = (void *)((uintptr_t)REPORT_OUTPUT_KEY_EVENTS), + .ident = "Dell Vostro V131", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), + }, + }, {} }; @@ -1480,7 +1512,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) /* Something vetoed the keypress. */ keycode = 0; - if (keycode) { + if (keycode && (report_key_events & REPORT_OUTPUT_KEY_EVENTS)) { input_report_key(input, keycode, 1); input_sync(input); input_report_key(input, keycode, 0); @@ -1544,7 +1576,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) acpi_notifier_call_chain(device, event, 0); - if (keycode) { + if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) { input_report_key(input, keycode, 1); input_sync(input); input_report_key(input, keycode, 0); @@ -2017,8 +2049,8 @@ int acpi_video_register(void) { int ret = 0; - mutex_lock(®ister_count_mutex); - if (register_count) { + mutex_lock(®ister_done_mutex); + if (completion_done(®ister_done)) { /* * if the function of acpi_video_register is already called, * don't register the acpi_vide_bus again and return no error. @@ -2039,22 +2071,22 @@ int acpi_video_register(void) * When the acpi_video_bus is loaded successfully, increase * the counter reference. */ - register_count = 1; + complete(®ister_done); leave: - mutex_unlock(®ister_count_mutex); + mutex_unlock(®ister_done_mutex); return ret; } EXPORT_SYMBOL(acpi_video_register); void acpi_video_unregister(void) { - mutex_lock(®ister_count_mutex); - if (register_count) { + mutex_lock(®ister_done_mutex); + if (completion_done(®ister_done)) { acpi_bus_unregister_driver(&acpi_video_bus); - register_count = 0; + reinit_completion(®ister_done); } - mutex_unlock(®ister_count_mutex); + mutex_unlock(®ister_done_mutex); } EXPORT_SYMBOL(acpi_video_unregister); @@ -2062,15 +2094,29 @@ void acpi_video_unregister_backlight(void) { struct acpi_video_bus *video; - mutex_lock(®ister_count_mutex); - if (register_count) { + mutex_lock(®ister_done_mutex); + if (completion_done(®ister_done)) { mutex_lock(&video_list_lock); list_for_each_entry(video, &video_bus_head, entry) acpi_video_bus_unregister_backlight(video); mutex_unlock(&video_list_lock); } - mutex_unlock(®ister_count_mutex); + mutex_unlock(®ister_done_mutex); +} + +bool acpi_video_handles_brightness_key_presses(void) +{ + bool have_video_busses; + + wait_for_completion(®ister_done); + mutex_lock(&video_list_lock); + have_video_busses = !list_empty(&video_bus_head); + mutex_unlock(&video_list_lock); + + return have_video_busses && + (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS); } +EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses); /* * This is kind of nasty. Hardware using Intel chipsets may require diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 885936f79542..f682374c19f4 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -50,6 +50,7 @@ acpi-y += \ exdump.o \ exfield.o \ exfldio.o \ + exmisc.o \ exmutex.o \ exnames.o \ exoparg1.o \ @@ -57,7 +58,6 @@ acpi-y += \ exoparg3.o \ exoparg6.o \ exprep.o \ - exmisc.o \ exregion.o \ exresnte.o \ exresolv.o \ @@ -66,6 +66,7 @@ acpi-y += \ exstoren.o \ exstorob.o \ exsystem.o \ + extrace.o \ exutils.o acpi-y += \ @@ -196,7 +197,6 @@ acpi-$(ACPI_FUTURE_USAGE) += \ dbfileio.o \ dbtest.o \ utcache.o \ - utfileio.o \ utprint.o \ uttrack.o \ utuuid.o diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index e4cc48fbf4ee..8b4ff40a294c 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -44,6 +44,8 @@ #ifndef _ACAPPS #define _ACAPPS +#include <stdio.h> + /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" @@ -85,11 +87,40 @@ acpi_os_printf (description); #define ACPI_OPTION(name, description) \ - acpi_os_printf (" %-18s%s\n", name, description); + acpi_os_printf (" %-20s%s\n", name, description); + +/* Check for unexpected exceptions */ + +#define ACPI_CHECK_STATUS(name, status, expected) \ + if (status != expected) \ + { \ + acpi_os_printf ("Unexpected %s from %s (%s-%d)\n", \ + acpi_format_exception (status), #name, _acpi_module_name, __LINE__); \ + } + +/* Check for unexpected non-AE_OK errors */ + +#define ACPI_CHECK_OK(name, status) ACPI_CHECK_STATUS (name, status, AE_OK); #define FILE_SUFFIX_DISASSEMBLY "dsl" #define FILE_SUFFIX_BINARY_TABLE ".dat" /* Needs the dot */ +/* acfileio */ + +acpi_status +ac_get_all_tables_from_file(char *filename, + u8 get_only_aml_tables, + struct acpi_new_table_desc **return_list_head); + +u8 ac_is_file_binary(FILE * file); + +acpi_status ac_validate_table_header(FILE * file, long table_offset); + +/* Values for get_only_aml_tables */ + +#define ACPI_GET_ONLY_AML_TABLES TRUE +#define ACPI_GET_ALL_TABLES FALSE + /* * getopt */ @@ -107,30 +138,6 @@ extern char *acpi_gbl_optarg; */ u32 cm_get_file_size(ACPI_FILE file); -#ifndef ACPI_DUMP_APP -/* - * adisasm - */ -acpi_status -ad_aml_disassemble(u8 out_to_file, - char *filename, char *prefix, char **out_filename); - -void ad_print_statistics(void); - -acpi_status ad_find_dsdt(u8 **dsdt_ptr, u32 *dsdt_length); - -void ad_dump_tables(void); - -acpi_status ad_get_local_tables(void); - -acpi_status -ad_parse_table(struct acpi_table_header *table, - acpi_owner_id * owner_id, u8 load_table, u8 external); - -acpi_status ad_display_tables(char *filename, struct acpi_table_header *table); - -acpi_status ad_display_statistics(void); - /* * adwalk */ @@ -168,6 +175,5 @@ char *ad_generate_filename(char *prefix, char *table_id); void ad_write_table(struct acpi_table_header *table, u32 length, char *table_name, char *oem_table_id); -#endif #endif /* _ACAPPS */ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index c928ba494c40..ecb05f1c1d5c 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -80,9 +80,15 @@ struct acpi_db_execute_walk { /* * dbxface - external debugger interfaces */ -acpi_status -acpi_db_single_step(struct acpi_walk_state *walk_state, - union acpi_parse_object *op, u32 op_type); +ACPI_DBR_DEPENDENT_RETURN_OK(acpi_status + acpi_db_single_step(struct acpi_walk_state + *walk_state, + union acpi_parse_object *op, + u32 op_type)) + ACPI_DBR_DEPENDENT_RETURN_VOID(void + acpi_db_signal_break_point(struct + acpi_walk_state + *walk_state)) /* * dbcmds - debug commands and output routines @@ -182,11 +188,15 @@ void acpi_db_display_method_info(union acpi_parse_object *op); void acpi_db_decode_and_display_object(char *target, char *output_type); -void -acpi_db_display_result_object(union acpi_operand_object *obj_desc, - struct acpi_walk_state *walk_state); +ACPI_DBR_DEPENDENT_RETURN_VOID(void + acpi_db_display_result_object(union + acpi_operand_object + *obj_desc, + struct + acpi_walk_state + *walk_state)) -acpi_status acpi_db_display_all_methods(char *display_count_arg); + acpi_status acpi_db_display_all_methods(char *display_count_arg); void acpi_db_display_arguments(void); @@ -198,9 +208,13 @@ void acpi_db_display_calling_tree(void); void acpi_db_display_object_type(char *object_arg); -void -acpi_db_display_argument_object(union acpi_operand_object *obj_desc, - struct acpi_walk_state *walk_state); +ACPI_DBR_DEPENDENT_RETURN_VOID(void + acpi_db_display_argument_object(union + acpi_operand_object + *obj_desc, + struct + acpi_walk_state + *walk_state)) /* * dbexec - debugger control method execution @@ -231,10 +245,7 @@ void acpi_db_open_debug_file(char *name); acpi_status acpi_db_load_acpi_table(char *filename); -acpi_status -acpi_db_get_table_from_file(char *filename, - struct acpi_table_header **table, - u8 must_be_aml_table); +acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head); /* * dbhistry - debugger HISTORY command @@ -257,7 +268,7 @@ acpi_db_command_dispatch(char *input_buffer, void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); -acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op); +acpi_status acpi_db_user_commands(void); char *acpi_db_get_next_token(char *string, char **next, acpi_object_type * return_type); diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 228704b78657..d18f18409071 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -161,6 +161,11 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* * evhandler - Address space handling */ +union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type + space_id, + union acpi_operand_object + *handler_obj); + u8 acpi_ev_has_default_handler(struct acpi_namespace_node *node, acpi_adr_space_type space_id); @@ -193,9 +198,11 @@ void acpi_ev_detach_region(union acpi_operand_object *region_obj, u8 acpi_ns_is_locked); -acpi_status +void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj); + +void acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, - acpi_adr_space_type space_id); + acpi_adr_space_type space_id, u32 function); acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index faa97604d878..73462cac41d2 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -145,6 +145,7 @@ ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache); ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0); ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_early_initialization, TRUE); /* Global handlers */ @@ -164,7 +165,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset); /* Initialization sequencing */ -ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed); +ACPI_INIT_GLOBAL(u8, acpi_gbl_reg_methods_enabled, FALSE); /* Misc */ @@ -326,7 +327,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list); #ifdef ACPI_DEBUGGER ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE); -ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE); ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID); ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods); @@ -345,7 +345,6 @@ ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]); /* These buffers should all be the same size */ -ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]); ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]); ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]); ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]); @@ -360,9 +359,6 @@ ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc); ACPI_GLOBAL(u32, acpi_gbl_num_nodes); ACPI_GLOBAL(u32, acpi_gbl_num_objects); -ACPI_GLOBAL(acpi_mutex, acpi_gbl_db_command_ready); -ACPI_GLOBAL(acpi_mutex, acpi_gbl_db_command_complete); - #endif /* ACPI_DEBUGGER */ /***************************************************************************** diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index e1dd784d8515..24928ec444de 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -219,6 +219,13 @@ struct acpi_table_list { #define ACPI_ROOT_ORIGIN_ALLOCATED (1) #define ACPI_ROOT_ALLOW_RESIZE (2) +/* List to manage incoming ACPI tables */ + +struct acpi_new_table_desc { + struct acpi_table_header *table; + struct acpi_new_table_desc *next; +}; + /* Predefined table indexes */ #define ACPI_INVALID_TABLE_INDEX (0xFFFFFFFF) @@ -388,7 +395,8 @@ union acpi_predefined_info { /* Return object auto-repair info */ -typedef acpi_status(*acpi_object_converter) (union acpi_operand_object +typedef acpi_status(*acpi_object_converter) (struct acpi_namespace_node * scope, + union acpi_operand_object *original_object, union acpi_operand_object **converted_object); @@ -420,6 +428,7 @@ struct acpi_simple_repair_info { struct acpi_reg_walk_info { acpi_adr_space_type space_id; + u32 function; u32 reg_run_count; }; @@ -861,6 +870,7 @@ struct acpi_parse_state { #define ACPI_PARSEOP_CLOSING_PAREN 0x10 #define ACPI_PARSEOP_COMPOUND 0x20 #define ACPI_PARSEOP_ASSIGNMENT 0x40 +#define ACPI_PARSEOP_ELSEIF 0x80 /***************************************************************************** * diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index e85366ceb15a..bad5bca03acc 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -401,17 +401,6 @@ #endif /* - * Some code only gets executed when the debugger is built in. - * Note that this is entirely independent of whether the - * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not. - */ -#ifdef ACPI_DEBUGGER -#define ACPI_DEBUGGER_EXEC(a) a -#else -#define ACPI_DEBUGGER_EXEC(a) -#endif - -/* * Macros used for ACPICA utilities only */ diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 5d261c942a0d..d082e62d7308 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -77,6 +77,7 @@ /* Object is not a package element */ #define ACPI_NOT_PACKAGE_ELEMENT ACPI_UINT32_MAX +#define ACPI_ALL_PACKAGE_ELEMENTS (ACPI_UINT32_MAX-1) /* Always emit warning message, not dependent on node flags */ @@ -183,13 +184,20 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, union acpi_operand_object **return_object); acpi_status -acpi_ns_convert_to_unicode(union acpi_operand_object *original_object, +acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope, + union acpi_operand_object *original_object, union acpi_operand_object **return_object); acpi_status -acpi_ns_convert_to_resource(union acpi_operand_object *original_object, +acpi_ns_convert_to_resource(struct acpi_namespace_node *scope, + union acpi_operand_object *original_object, union acpi_operand_object **return_object); +acpi_status +acpi_ns_convert_to_reference(struct acpi_namespace_node *scope, + union acpi_operand_object *original_object, + union acpi_operand_object **return_object); + /* * nsdump - Namespace dump/print utilities */ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 0bd02c4a5f75..2b154cfbe136 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -93,9 +93,10 @@ #define AOPOBJ_AML_CONSTANT 0x01 /* Integer is an AML constant */ #define AOPOBJ_STATIC_POINTER 0x02 /* Data is part of an ACPI table, don't delete */ #define AOPOBJ_DATA_VALID 0x04 /* Object is initialized and data is valid */ -#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */ -#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */ -#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */ +#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized */ +#define AOPOBJ_REG_CONNECTED 0x10 /* _REG was run */ +#define AOPOBJ_SETUP_COMPLETE 0x20 /* Region setup is complete */ +#define AOPOBJ_INVALID 0x40 /* Host OS won't allow a Region address */ /****************************************************************************** * diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index f9acf92fa0bc..324512db62bf 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -92,7 +92,7 @@ #define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING) #define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) -#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME) +#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_NAME_OR_REF,ARGP_TARGET) #define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING) #define ARGP_CONTINUE_OP ARG_NONE #define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME) @@ -152,13 +152,14 @@ #define ARGP_NAMEPATH_OP ARGP_LIST1 (ARGP_NAMESTRING) #define ARGP_NOOP_OP ARG_NONE #define ARGP_NOTIFY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG) +#define ARGP_OBJECT_TYPE_OP ARGP_LIST1 (ARGP_NAME_OR_REF) #define ARGP_ONE_OP ARG_NONE #define ARGP_ONES_OP ARG_NONE #define ARGP_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST) #define ARGP_POWER_RES_OP ARGP_LIST5 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_WORDDATA, ARGP_OBJLIST) #define ARGP_PROCESSOR_OP ARGP_LIST6 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_BYTEDATA, ARGP_OBJLIST) #define ARGP_QWORD_OP ARGP_LIST1 (ARGP_QWORDDATA) -#define ARGP_REF_OF_OP ARGP_LIST1 (ARGP_SUPERNAME) +#define ARGP_REF_OF_OP ARGP_LIST1 (ARGP_NAME_OR_REF) #define ARGP_REGION_OP ARGP_LIST4 (ARGP_NAME, ARGP_BYTEDATA, ARGP_TERMARG, ARGP_TERMARG) #define ARGP_RELEASE_OP ARGP_LIST1 (ARGP_SUPERNAME) #define ARGP_RESERVEDFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING) @@ -185,7 +186,6 @@ #define ARGP_TO_HEX_STR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET) #define ARGP_TO_INTEGER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET) #define ARGP_TO_STRING_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) -#define ARGP_TYPE_OP ARGP_LIST1 (ARGP_SUPERNAME) #define ARGP_UNLOAD_OP ARGP_LIST1 (ARGP_SUPERNAME) #define ARGP_VAR_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_DATAOBJLIST) #define ARGP_WAIT_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG) @@ -223,7 +223,7 @@ #define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_BYTE_OP ARGI_INVALID_OPCODE #define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE -#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF) +#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_ANYTYPE, ARGI_ANYTYPE, ARGI_TARGETREF) #define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) #define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) #define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE @@ -285,6 +285,7 @@ #define ARGI_NAMEPATH_OP ARGI_INVALID_OPCODE #define ARGI_NOOP_OP ARG_NONE #define ARGI_NOTIFY_OP ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER) +#define ARGI_OBJECT_TYPE_OP ARGI_LIST1 (ARGI_ANYTYPE) #define ARGI_ONE_OP ARG_NONE #define ARGI_ONES_OP ARG_NONE #define ARGI_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER) @@ -318,7 +319,6 @@ #define ARGI_TO_HEX_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET) #define ARGI_TO_INTEGER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET) #define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_FIXED_TARGET) -#define ARGI_TYPE_OP ARGI_LIST1 (ARGI_ANYTYPE) #define ARGI_UNLOAD_OP ARGI_LIST1 (ARGI_DDBHANDLE) #define ARGI_VAR_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_INTEGER) diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 8fc8c7cea879..96d510a7feba 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -92,7 +92,13 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state, acpi_status acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, struct acpi_parse_state *parser_state, - union acpi_parse_object *arg, u8 method_call); + union acpi_parse_object *arg, + u8 possible_method_call); + +/* Values for u8 above */ + +#define ACPI_NOT_METHOD_CALL FALSE +#define ACPI_POSSIBLE_METHOD_CALL TRUE acpi_status acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 8b8fef6cc32d..9e84c05c0b91 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -184,24 +184,24 @@ acpi_status acpi_ut_init_globals(void); #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) -char *acpi_ut_get_mutex_name(u32 mutex_id); +const char *acpi_ut_get_mutex_name(u32 mutex_id); const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type); #endif -char *acpi_ut_get_type_name(acpi_object_type type); +const char *acpi_ut_get_type_name(acpi_object_type type); -char *acpi_ut_get_node_name(void *object); +const char *acpi_ut_get_node_name(void *object); -char *acpi_ut_get_descriptor_name(void *object); +const char *acpi_ut_get_descriptor_name(void *object); const char *acpi_ut_get_reference_name(union acpi_operand_object *object); -char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc); +const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc); -char *acpi_ut_get_region_name(u8 space_id); +const char *acpi_ut_get_region_name(u8 space_id); -char *acpi_ut_get_event_name(u32 event_id); +const char *acpi_ut_get_event_name(u32 event_id); char acpi_ut_hex_to_ascii_char(u64 integer, u32 position); @@ -353,14 +353,6 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, u8 method_count, u8 *out_values); /* - * utfileio - file operations - */ -#ifdef ACPI_APPLICATION -acpi_status -acpi_ut_read_table_from_file(char *filename, struct acpi_table_header **table); -#endif - -/* * utids - device ID support */ acpi_status @@ -372,10 +364,6 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node, struct acpi_pnp_device_id ** return_id); acpi_status -acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, - struct acpi_pnp_device_id **return_id); - -acpi_status acpi_ut_execute_CID(struct acpi_namespace_node *device_node, struct acpi_pnp_device_id_list ** return_cid_list); diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 883f20cfa698..ab9f3f1fbb0f 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -120,7 +120,7 @@ #define AML_CREATE_WORD_FIELD_OP (u16) 0x8b #define AML_CREATE_BYTE_FIELD_OP (u16) 0x8c #define AML_CREATE_BIT_FIELD_OP (u16) 0x8d -#define AML_TYPE_OP (u16) 0x8e +#define AML_OBJECT_TYPE_OP (u16) 0x8e #define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */ #define AML_LAND_OP (u16) 0x90 #define AML_LOR_OP (u16) 0x91 @@ -238,7 +238,8 @@ #define ARGP_TERMLIST 0x0F #define ARGP_WORDDATA 0x10 #define ARGP_QWORDDATA 0x11 -#define ARGP_SIMPLENAME 0x12 +#define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */ +#define ARGP_NAME_OR_REF 0x13 /* For object_type only */ /* * Resolved argument types for the AML Interpreter diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c index 30414b3d7fdd..328c35b323d5 100644 --- a/drivers/acpi/acpica/dbcmds.c +++ b/drivers/acpi/acpica/dbcmds.c @@ -798,7 +798,7 @@ acpi_db_device_resources(acpi_handle obj_handle, acpi_status status; node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); - parent_path = acpi_ns_get_external_pathname(node); + parent_path = acpi_ns_get_normalized_pathname(node, TRUE); if (!parent_path) { return (AE_NO_MEMORY); } @@ -1131,13 +1131,8 @@ void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg) u32 debug_layer = 0; u32 flags = 0; - if (enable_arg) { - acpi_ut_strupr(enable_arg); - } - - if (once_arg) { - acpi_ut_strupr(once_arg); - } + acpi_ut_strupr(enable_arg); + acpi_ut_strupr(once_arg); if (method_arg) { if (acpi_db_trace_method_name) { diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index 672977ec7c7d..1965b48d8e83 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -48,6 +48,7 @@ #include "acnamesp.h" #include "acparser.h" #include "acinterp.h" +#include "acevents.h" #include "acdebug.h" #define _COMPONENT ACPI_CA_DEBUGGER @@ -588,7 +589,7 @@ void acpi_db_display_calling_tree(void) * * FUNCTION: acpi_db_display_object_type * - * PARAMETERS: name - User entered NS node handle or name + * PARAMETERS: object_arg - User entered NS node handle * * RETURN: None * @@ -596,44 +597,34 @@ void acpi_db_display_calling_tree(void) * ******************************************************************************/ -void acpi_db_display_object_type(char *name) +void acpi_db_display_object_type(char *object_arg) { - struct acpi_namespace_node *node; + acpi_handle handle; struct acpi_device_info *info; acpi_status status; u32 i; - node = acpi_db_convert_to_node(name); - if (!node) { - return; - } + handle = ACPI_TO_POINTER(strtoul(object_arg, NULL, 16)); - status = acpi_get_object_info(ACPI_CAST_PTR(acpi_handle, node), &info); + status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not get object info, %s\n", acpi_format_exception(status)); return; } - if (info->valid & ACPI_VALID_ADR) { - acpi_os_printf("ADR: %8.8X%8.8X, STA: %8.8X, Flags: %X\n", - ACPI_FORMAT_UINT64(info->address), - info->current_status, info->flags); - } - if (info->valid & ACPI_VALID_SXDS) { - acpi_os_printf("S1D-%2.2X S2D-%2.2X S3D-%2.2X S4D-%2.2X\n", - info->highest_dstates[0], - info->highest_dstates[1], - info->highest_dstates[2], - info->highest_dstates[3]); - } - if (info->valid & ACPI_VALID_SXWS) { - acpi_os_printf - ("S0W-%2.2X S1W-%2.2X S2W-%2.2X S3W-%2.2X S4W-%2.2X\n", - info->lowest_dstates[0], info->lowest_dstates[1], - info->lowest_dstates[2], info->lowest_dstates[3], - info->lowest_dstates[4]); - } + acpi_os_printf("ADR: %8.8X%8.8X, STA: %8.8X, Flags: %X\n", + ACPI_FORMAT_UINT64(info->address), + info->current_status, info->flags); + + acpi_os_printf("S1D-%2.2X S2D-%2.2X S3D-%2.2X S4D-%2.2X\n", + info->highest_dstates[0], info->highest_dstates[1], + info->highest_dstates[2], info->highest_dstates[3]); + + acpi_os_printf("S0W-%2.2X S1W-%2.2X S2W-%2.2X S3W-%2.2X S4W-%2.2X\n", + info->lowest_dstates[0], info->lowest_dstates[1], + info->lowest_dstates[2], info->lowest_dstates[3], + info->lowest_dstates[4]); if (info->valid & ACPI_VALID_HID) { acpi_os_printf("HID: %s\n", info->hardware_id.string); @@ -643,10 +634,6 @@ void acpi_db_display_object_type(char *name) acpi_os_printf("UID: %s\n", info->unique_id.string); } - if (info->valid & ACPI_VALID_SUB) { - acpi_os_printf("SUB: %s\n", info->subsystem_id.string); - } - if (info->valid & ACPI_VALID_CID) { for (i = 0; i < info->compatible_id_list.count; i++) { acpi_os_printf("CID %u: %s\n", i, @@ -679,6 +666,12 @@ acpi_db_display_result_object(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { +#ifndef ACPI_APPLICATION + if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { + return; + } +#endif + /* Only display if single stepping */ if (!acpi_gbl_cm_single_step) { @@ -708,6 +701,12 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { +#ifndef ACPI_APPLICATION + if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { + return; + } +#endif + if (!acpi_gbl_cm_single_step) { return; } @@ -951,28 +950,25 @@ void acpi_db_display_handlers(void) if (obj_desc) { for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_gbl_space_id_list); i++) { space_id = acpi_gbl_space_id_list[i]; - handler_obj = obj_desc->device.handler; acpi_os_printf(ACPI_PREDEFINED_PREFIX, acpi_ut_get_region_name((u8)space_id), space_id); - while (handler_obj) { - if (acpi_gbl_space_id_list[i] == - handler_obj->address_space.space_id) { - acpi_os_printf - (ACPI_HANDLER_PRESENT_STRING, - (handler_obj->address_space. - handler_flags & - ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) - ? "Default" : "User", - handler_obj->address_space. - handler); - - goto found_handler; - } + handler_obj = + acpi_ev_find_region_handler(space_id, + obj_desc->common_notify. + handler); + if (handler_obj) { + acpi_os_printf(ACPI_HANDLER_PRESENT_STRING, + (handler_obj->address_space. + handler_flags & + ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) + ? "Default" : "User", + handler_obj->address_space. + handler); - handler_obj = handler_obj->address_space.next; + goto found_handler; } /* There is no handler for this space_id */ @@ -984,7 +980,7 @@ found_handler: ; /* Find all handlers for user-defined space_IDs */ - handler_obj = obj_desc->device.handler; + handler_obj = obj_desc->common_notify.handler; while (handler_obj) { if (handler_obj->address_space.space_id >= ACPI_USER_REGION_BEGIN) { @@ -1079,14 +1075,14 @@ acpi_db_display_non_root_handlers(acpi_handle obj_handle, return (AE_OK); } - pathname = acpi_ns_get_external_pathname(node); + pathname = acpi_ns_get_normalized_pathname(node, TRUE); if (!pathname) { return (AE_OK); } /* Display all handlers associated with this device */ - handler_obj = obj_desc->device.handler; + handler_obj = obj_desc->common_notify.handler; while (handler_obj) { acpi_os_printf(ACPI_PREDEFINED_PREFIX, acpi_ut_get_region_name((u8)handler_obj-> diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c index d0e6b20ce82a..31f54d71c51a 100644 --- a/drivers/acpi/acpica/dbfileio.c +++ b/drivers/acpi/acpica/dbfileio.c @@ -46,6 +46,10 @@ #include "accommon.h" #include "acdebug.h" #include "actables.h" +#include <stdio.h> +#ifdef ACPI_APPLICATION +#include "acapps.h" +#endif #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbfileio") @@ -110,122 +114,31 @@ void acpi_db_open_debug_file(char *name) } #endif -#ifdef ACPI_APPLICATION -#include "acapps.h" - -/******************************************************************************* - * - * FUNCTION: ae_local_load_table - * - * PARAMETERS: table - pointer to a buffer containing the entire - * table to be loaded - * - * RETURN: Status - * - * DESCRIPTION: This function is called to load a table from the caller's - * buffer. The buffer must contain an entire ACPI Table including - * a valid header. The header fields will be verified, and if it - * is determined that the table is invalid, the call will fail. - * - ******************************************************************************/ - -static acpi_status ae_local_load_table(struct acpi_table_header *table) -{ - acpi_status status = AE_OK; - - ACPI_FUNCTION_TRACE(ae_local_load_table); - -#if 0 -/* struct acpi_table_desc table_info; */ - - if (!table) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - table_info.pointer = table; - status = acpi_tb_recognize_table(&table_info, ACPI_TABLE_ALL); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* Install the new table into the local data structures */ - - status = acpi_tb_init_table_descriptor(&table_info); - if (ACPI_FAILURE(status)) { - if (status == AE_ALREADY_EXISTS) { - - /* Table already exists, no error */ - - status = AE_OK; - } - - /* Free table allocated by acpi_tb_get_table */ - - acpi_tb_delete_single_table(&table_info); - return_ACPI_STATUS(status); - } -#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) - - status = - acpi_ns_load_table(table_info.installed_desc, acpi_gbl_root_node); - if (ACPI_FAILURE(status)) { - - /* Uninstall table and free the buffer */ - - acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_DSDT); - return_ACPI_STATUS(status); - } -#endif -#endif - - return_ACPI_STATUS(status); -} -#endif - /******************************************************************************* * - * FUNCTION: acpi_db_get_table_from_file + * FUNCTION: acpi_db_load_tables * - * PARAMETERS: filename - File where table is located - * return_table - Where a pointer to the table is returned + * PARAMETERS: list_head - List of ACPI tables to load * * RETURN: Status * - * DESCRIPTION: Load an ACPI table from a file + * DESCRIPTION: Load ACPI tables from a previously constructed table list. * ******************************************************************************/ -acpi_status -acpi_db_get_table_from_file(char *filename, - struct acpi_table_header **return_table, - u8 must_be_aml_file) +acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head) { -#ifdef ACPI_APPLICATION acpi_status status; + struct acpi_new_table_desc *table_list_head; struct acpi_table_header *table; - u8 is_aml_table = TRUE; - - status = acpi_ut_read_table_from_file(filename, &table); - if (ACPI_FAILURE(status)) { - return (status); - } - - if (must_be_aml_file) { - is_aml_table = acpi_ut_is_aml_table(table); - if (!is_aml_table) { - ACPI_EXCEPTION((AE_INFO, AE_OK, - "Input for -e is not an AML table: " - "\"%4.4s\" (must be DSDT/SSDT)", - table->signature)); - return (AE_TYPE); - } - } - if (is_aml_table) { + /* Load all ACPI tables in the list */ - /* Attempt to recognize and install the table */ + table_list_head = list_head; + while (table_list_head) { + table = table_list_head->table; - status = ae_local_load_table(table); + status = acpi_load_table(table); if (ACPI_FAILURE(status)) { if (status == AE_ALREADY_EXISTS) { acpi_os_printf @@ -239,18 +152,12 @@ acpi_db_get_table_from_file(char *filename, return (status); } - acpi_tb_print_table_header(0, table); - fprintf(stderr, "Acpi table [%4.4s] successfully installed and loaded\n", table->signature); - } - acpi_gbl_acpi_hardware_present = FALSE; - if (return_table) { - *return_table = table; + table_list_head = table_list_head->next; } -#endif /* ACPI_APPLICATION */ return (AE_OK); } diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 0480254437f1..6203001baa30 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -45,6 +45,10 @@ #include "accommon.h" #include "acdebug.h" +#ifdef ACPI_APPLICATION +#include "acapps.h" +#endif + #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbinput") @@ -53,8 +57,6 @@ static u32 acpi_db_get_line(char *input_buffer); static u32 acpi_db_match_command(char *user_command); -static void acpi_db_single_thread(void); - static void acpi_db_display_command_info(char *command, u8 display_all); static void acpi_db_display_help(char *command); @@ -623,9 +625,7 @@ static u32 acpi_db_get_line(char *input_buffer) /* Uppercase the actual command */ - if (acpi_gbl_db_args[0]) { - acpi_ut_strupr(acpi_gbl_db_args[0]); - } + acpi_ut_strupr(acpi_gbl_db_args[0]); count = i; if (count) { @@ -1050,11 +1050,17 @@ acpi_db_command_dispatch(char *input_buffer, acpi_db_close_debug_file(); break; - case CMD_LOAD: + case CMD_LOAD:{ + struct acpi_new_table_desc *list_head = NULL; - status = - acpi_db_get_table_from_file(acpi_gbl_db_args[1], NULL, - FALSE); + status = + ac_get_all_tables_from_file(acpi_gbl_db_args[1], + ACPI_GET_ALL_TABLES, + &list_head); + if (ACPI_SUCCESS(status)) { + acpi_db_load_tables(list_head); + } + } break; case CMD_OPEN: @@ -1149,55 +1155,16 @@ acpi_db_command_dispatch(char *input_buffer, void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context) { - acpi_status status = AE_OK; - acpi_status Mstatus; - - while (status != AE_CTRL_TERMINATE && !acpi_gbl_db_terminate_loop) { - acpi_gbl_method_executing = FALSE; - acpi_gbl_step_to_next_call = FALSE; - - Mstatus = acpi_os_acquire_mutex(acpi_gbl_db_command_ready, - ACPI_WAIT_FOREVER); - if (ACPI_FAILURE(Mstatus)) { - return; - } - - status = - acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, NULL); - acpi_os_release_mutex(acpi_gbl_db_command_complete); - } + (void)acpi_db_user_commands(); acpi_gbl_db_threads_terminated = TRUE; } /******************************************************************************* * - * FUNCTION: acpi_db_single_thread - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Debugger execute thread. Waits for a command line, then - * simply dispatches it. - * - ******************************************************************************/ - -static void acpi_db_single_thread(void) -{ - - acpi_gbl_method_executing = FALSE; - acpi_gbl_step_to_next_call = FALSE; - - (void)acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, NULL); -} - -/******************************************************************************* - * * FUNCTION: acpi_db_user_commands * - * PARAMETERS: prompt - User prompt (depends on mode) - * op - Current executing parse op + * PARAMETERS: None * * RETURN: None * @@ -1206,7 +1173,7 @@ static void acpi_db_single_thread(void) * ******************************************************************************/ -acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op) +acpi_status acpi_db_user_commands(void) { acpi_status status = AE_OK; @@ -1216,52 +1183,31 @@ acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op) while (!acpi_gbl_db_terminate_loop) { - /* Force output to console until a command is entered */ - - acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); - - /* Different prompt if method is executing */ - - if (!acpi_gbl_method_executing) { - acpi_os_printf("%1c ", ACPI_DEBUGGER_COMMAND_PROMPT); - } else { - acpi_os_printf("%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT); - } + /* Wait the readiness of the command */ - /* Get the user input line */ - - status = acpi_os_get_line(acpi_gbl_db_line_buf, - ACPI_DB_LINE_BUFFER_SIZE, NULL); + status = acpi_os_wait_command_ready(); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "While parsing command line")); - return (status); + break; } - /* Check for single or multithreaded debug */ + /* Just call to the command line interpreter */ - if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) { - /* - * Signal the debug thread that we have a command to execute, - * and wait for the command to complete. - */ - acpi_os_release_mutex(acpi_gbl_db_command_ready); - if (ACPI_FAILURE(status)) { - return (status); - } + acpi_gbl_method_executing = FALSE; + acpi_gbl_step_to_next_call = FALSE; - status = - acpi_os_acquire_mutex(acpi_gbl_db_command_complete, - ACPI_WAIT_FOREVER); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { - /* Just call to the command line interpreter */ + (void)acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, + NULL); + + /* Notify the completion of the command */ - acpi_db_single_thread(); + status = acpi_os_notify_command_complete(); + if (ACPI_FAILURE(status)) { + break; } } + if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) { + ACPI_EXCEPTION((AE_INFO, status, "While parsing command line")); + } return (status); } diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 04ff1ebfda58..4f68dfc6ea55 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -438,7 +438,7 @@ acpi_db_walk_for_predefined_names(acpi_handle obj_handle, return (AE_OK); } - pathname = acpi_ns_get_external_pathname(node); + pathname = acpi_ns_get_normalized_pathname(node, TRUE); if (!pathname) { return (AE_OK); } diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c index 4ba0a20811eb..de255d975941 100644 --- a/drivers/acpi/acpica/dbstats.c +++ b/drivers/acpi/acpica/dbstats.c @@ -382,6 +382,7 @@ acpi_status acpi_db_display_statistics(char *type_arg) acpi_gbl_node_type_count[i], acpi_gbl_obj_type_count[i]); } + acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown", acpi_gbl_node_type_count_misc, acpi_gbl_obj_type_count_misc); diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c index 10ea8bf9b810..68b4e8d9e1d6 100644 --- a/drivers/acpi/acpica/dbtest.c +++ b/drivers/acpi/acpica/dbtest.c @@ -953,7 +953,7 @@ acpi_db_evaluate_one_predefined_name(acpi_handle obj_handle, return (AE_OK); } - pathname = acpi_ns_get_external_pathname(node); + pathname = acpi_ns_get_normalized_pathname(node, TRUE); if (!pathname) { return (AE_OK); } diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c index 86790e080139..8c85d85a9cb2 100644 --- a/drivers/acpi/acpica/dbutils.c +++ b/drivers/acpi/acpica/dbutils.c @@ -173,6 +173,7 @@ void acpi_db_dump_external_object(union acpi_object *obj_desc, u32 level) if (obj_desc->buffer.length > 16) { acpi_os_printf("\n"); } + acpi_ut_debug_dump_buffer(ACPI_CAST_PTR (u8, obj_desc->buffer.pointer), diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c index 342298a6e10f..d7ff58e8c233 100644 --- a/drivers/acpi/acpica/dbxface.c +++ b/drivers/acpi/acpica/dbxface.c @@ -85,46 +85,21 @@ acpi_db_start_command(struct acpi_walk_state *walk_state, acpi_gbl_method_executing = TRUE; status = AE_CTRL_TRUE; - while (status == AE_CTRL_TRUE) { - if (acpi_gbl_debugger_configuration == DEBUGGER_MULTI_THREADED) { - - /* Handshake with the front-end that gets user command lines */ - - acpi_os_release_mutex(acpi_gbl_db_command_complete); - - status = - acpi_os_acquire_mutex(acpi_gbl_db_command_ready, - ACPI_WAIT_FOREVER); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { - /* Single threaded, we must get a command line ourselves */ - - /* Force output to console until a command is entered */ - acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); + while (status == AE_CTRL_TRUE) { - /* Different prompt if method is executing */ + /* Notify the completion of the command */ - if (!acpi_gbl_method_executing) { - acpi_os_printf("%1c ", - ACPI_DEBUGGER_COMMAND_PROMPT); - } else { - acpi_os_printf("%1c ", - ACPI_DEBUGGER_EXECUTE_PROMPT); - } + status = acpi_os_notify_command_complete(); + if (ACPI_FAILURE(status)) { + goto error_exit; + } - /* Get the user input line */ + /* Wait the readiness of the command */ - status = acpi_os_get_line(acpi_gbl_db_line_buf, - ACPI_DB_LINE_BUFFER_SIZE, - NULL); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "While parsing command line")); - return (status); - } + status = acpi_os_wait_command_ready(); + if (ACPI_FAILURE(status)) { + goto error_exit; } status = @@ -134,11 +109,46 @@ acpi_db_start_command(struct acpi_walk_state *walk_state, /* acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); */ +error_exit: + if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) { + ACPI_EXCEPTION((AE_INFO, status, + "While parsing/handling command line")); + } return (status); } /******************************************************************************* * + * FUNCTION: acpi_db_signal_break_point + * + * PARAMETERS: walk_state - Current walk + * + * RETURN: Status + * + * DESCRIPTION: Called for AML_BREAK_POINT_OP + * + ******************************************************************************/ + +void acpi_db_signal_break_point(struct acpi_walk_state *walk_state) +{ + +#ifndef ACPI_APPLICATION + if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { + return; + } +#endif + + /* + * Set the single-step flag. This will cause the debugger (if present) + * to break to the console within the AML debugger at the start of the + * next AML instruction. + */ + acpi_gbl_cm_single_step = TRUE; + acpi_os_printf("**break** Executed AML BreakPoint opcode\n"); +} + +/******************************************************************************* + * * FUNCTION: acpi_db_single_step * * PARAMETERS: walk_state - Current walk @@ -420,15 +430,7 @@ acpi_status acpi_initialize_debugger(void) /* These were created with one unit, grab it */ - status = acpi_os_acquire_mutex(acpi_gbl_db_command_complete, - ACPI_WAIT_FOREVER); - if (ACPI_FAILURE(status)) { - acpi_os_printf("Could not get debugger mutex\n"); - return_ACPI_STATUS(status); - } - - status = acpi_os_acquire_mutex(acpi_gbl_db_command_ready, - ACPI_WAIT_FOREVER); + status = acpi_os_initialize_command_signals(); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not get debugger mutex\n"); return_ACPI_STATUS(status); @@ -473,13 +475,14 @@ void acpi_terminate_debugger(void) acpi_gbl_db_terminate_loop = TRUE; if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) { - acpi_os_release_mutex(acpi_gbl_db_command_ready); /* Wait the AML Debugger threads */ while (!acpi_gbl_db_threads_terminated) { acpi_os_sleep(100); } + + acpi_os_terminate_command_signals(); } if (acpi_gbl_db_buffer) { diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index e2ab59e39162..76cfced31f9f 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -194,8 +194,8 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc) extra_desc = acpi_ns_get_secondary_object(obj_desc); node = obj_desc->buffer_field.node; - ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD, - node, NULL)); + ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname + (ACPI_TYPE_BUFFER_FIELD, node, NULL)); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n", acpi_ut_get_node_name(node))); @@ -385,7 +385,8 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_REGION, node, NULL)); - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n", + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[%4.4s] OpRegion Arg Init at AML %p\n", acpi_ut_get_node_name(node), extra_desc->extra.aml_start)); diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 435fc16e2f83..06a6f7f3af52 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -47,6 +47,7 @@ #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" +#include "acdebug.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dscontrol") @@ -348,14 +349,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, case AML_BREAK_POINT_OP: - /* - * Set the single-step flag. This will cause the debugger (if present) - * to break to the console within the AML debugger at the start of the - * next AML instruction. - */ - ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE); - ACPI_DEBUGGER_EXEC(acpi_os_printf - ("**break** Executed AML BreakPoint opcode\n")); + acpi_db_signal_break_point(walk_state); /* Call to the OSL in case OS wants a piece of the action */ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 309556efc553..1eb82bd7ee16 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -161,6 +161,7 @@ acpi_ds_dump_method_stack(acpi_status status, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "\n**** Exception %s during execution of method ", acpi_format_exception(status))); + acpi_ds_print_node_pathname(walk_state->method_node, NULL); /* Display stack of executing methods */ @@ -203,8 +204,8 @@ acpi_ds_dump_method_stack(acpi_status status, } else { /* * This method has called another method - * NOTE: the method call parse subtree is already deleted at this - * point, so we cannot disassemble the method invocation. + * NOTE: the method call parse subtree is already deleted at + * this point, so we cannot disassemble the method invocation. */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "Call to method ")); diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 20de148594fd..6bca0ec42dbd 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -106,6 +106,7 @@ acpi_ds_create_external_region(acpi_status lookup_status, * insert the name into the namespace. */ acpi_dm_add_op_to_external_list(op, path, ACPI_TYPE_REGION, 0, 0); + status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION, ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT, walk_state, node); @@ -202,11 +203,10 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, /* Enter the name_string into the namespace */ - status = - acpi_ns_lookup(walk_state->scope_info, - arg->common.value.string, ACPI_TYPE_ANY, - ACPI_IMODE_LOAD_PASS1, flags, walk_state, - &node); + status = acpi_ns_lookup(walk_state->scope_info, + arg->common.value.string, ACPI_TYPE_ANY, + ACPI_IMODE_LOAD_PASS1, flags, + walk_state, &node); if (ACPI_FAILURE(status)) { ACPI_ERROR_NAMESPACE(arg->common.value.string, status); return_ACPI_STATUS(status); @@ -244,8 +244,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, } /* - * Remember location in AML stream of the field unit opcode and operands -- - * since the buffer and index operands must be evaluated. + * Remember location in AML stream of the field unit opcode and operands + * -- since the buffer and index operands must be evaluated. */ second_desc = obj_desc->common.next_object; second_desc->extra.aml_start = op->named.data; @@ -310,8 +310,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, switch (arg->common.aml_opcode) { case AML_INT_RESERVEDFIELD_OP: - position = (u64) info->field_bit_position - + (u64) arg->common.value.size; + position = (u64)info->field_bit_position + + (u64)arg->common.value.size; if (position > ACPI_UINT32_MAX) { ACPI_ERROR((AE_INFO, @@ -344,13 +344,13 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, /* access_attribute (attrib_quick, attrib_byte, etc.) */ - info->attribute = - (u8)((arg->common.value.integer >> 8) & 0xFF); + info->attribute = (u8) + ((arg->common.value.integer >> 8) & 0xFF); /* access_length (for serial/buffer protocols) */ - info->access_length = - (u8)((arg->common.value.integer >> 16) & 0xFF); + info->access_length = (u8) + ((arg->common.value.integer >> 16) & 0xFF); break; case AML_INT_CONNECTION_OP: @@ -425,8 +425,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, /* Keep track of bit position for the next field */ - position = (u64) info->field_bit_position - + (u64) arg->common.value.size; + position = (u64)info->field_bit_position + + (u64)arg->common.value.size; if (position > ACPI_UINT32_MAX) { ACPI_ERROR((AE_INFO, @@ -716,11 +716,12 @@ acpi_ds_create_bank_field(union acpi_parse_object *op, /* * Use Info.data_register_node to store bank_field Op - * It's safe because data_register_node will never be used when create bank field - * We store aml_start and aml_length in the bank_field Op for late evaluation - * Used in acpi_ex_prep_field_value(Info) + * It's safe because data_register_node will never be used when create + * bank field \we store aml_start and aml_length in the bank_field Op for + * late evaluation. Used in acpi_ex_prep_field_value(Info) * - * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"? + * TBD: Or, should we add a field in struct acpi_create_field_info, like + * "void *ParentOp"? */ info.data_register_node = (struct acpi_namespace_node *)op; diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index 920f1b199bc6..c1d8af8a8aaf 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -247,7 +247,7 @@ acpi_ds_initialize_objects(u32 table_index, /* Summary of objects initialized */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, " + "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, " "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", table->signature, table->oem_table_id, owner_id, info.object_count, info.device_count, diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index bc32f3194afe..6585e8e37c8e 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -118,10 +118,9 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, return_ACPI_STATUS(AE_NO_MEMORY); } - status = - acpi_ds_init_aml_walk(walk_state, op, node, - obj_desc->method.aml_start, - obj_desc->method.aml_length, NULL, 0); + status = acpi_ds_init_aml_walk(walk_state, op, node, + obj_desc->method.aml_start, + obj_desc->method.aml_length, NULL, 0); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); acpi_ps_free_op(op); @@ -375,7 +374,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, && (walk_state->thread->current_sync_level > obj_desc->method.mutex->mutex.sync_level)) { ACPI_ERROR((AE_INFO, - "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)", + "Cannot acquire Mutex for method [%4.4s]" + ", current SyncLevel is too large (%u)", acpi_ut_get_node_name(method_node), walk_state->thread->current_sync_level)); @@ -411,8 +411,19 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, obj_desc->method.mutex->mutex.thread_id = walk_state->thread->thread_id; - walk_state->thread->current_sync_level = - obj_desc->method.sync_level; + + /* + * Update the current sync_level only if this is not an auto- + * serialized method. In the auto case, we have to ignore + * the sync level for the method mutex (created for the + * auto-serialization) because we have no idea of what the + * sync level should be. Therefore, just ignore it. + */ + if (!(obj_desc->method.info_flags & + ACPI_METHOD_IGNORE_SYNC_LEVEL)) { + walk_state->thread->current_sync_level = + obj_desc->method.sync_level; + } } else { obj_desc->method.mutex->mutex. original_sync_level = @@ -501,16 +512,18 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, /* Init for new method, possibly wait on method mutex */ - status = acpi_ds_begin_method_execution(method_node, obj_desc, - this_walk_state); + status = + acpi_ds_begin_method_execution(method_node, obj_desc, + this_walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Begin method parse/execution. Create a new walk state */ - next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, - NULL, obj_desc, thread); + next_walk_state = + acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, + thread); if (!next_walk_state) { status = AE_NO_MEMORY; goto cleanup; @@ -797,7 +810,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, info_flags & ACPI_METHOD_SERIALIZED_PENDING) { if (walk_state) { ACPI_INFO((AE_INFO, - "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error", + "Marking method %4.4s as Serialized " + "because of AE_ALREADY_EXISTS error", walk_state->method_node->name. ascii)); } @@ -815,6 +829,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, */ method_desc->method.info_flags &= ~ACPI_METHOD_SERIALIZED_PENDING; + method_desc->method.info_flags |= (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 2e4c42b377ec..03c44f2ac7b7 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -99,6 +99,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state) for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) { ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name, NAMEOF_ARG_NTE); + walk_state->arguments[i].name.integer |= (i << 24); walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED; walk_state->arguments[i].type = ACPI_TYPE_ANY; @@ -201,7 +202,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params, if (!params) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "No param list passed to method\n")); + "No parameter list passed to method\n")); return_ACPI_STATUS(AE_OK); } @@ -214,9 +215,9 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params, * Store the argument in the method/walk descriptor. * Do not copy the arg in order to implement call by reference */ - status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index, - params[index], - walk_state); + status = + acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index, + params[index], walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -610,11 +611,11 @@ acpi_ds_store_object_to_local(u8 type, * do the indirect store */ if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) == - ACPI_DESC_TYPE_OPERAND) - && (current_obj_desc->common.type == - ACPI_TYPE_LOCAL_REFERENCE) - && (current_obj_desc->reference.class == - ACPI_REFCLASS_REFOF)) { + ACPI_DESC_TYPE_OPERAND) && + (current_obj_desc->common.type == + ACPI_TYPE_LOCAL_REFERENCE) && + (current_obj_desc->reference.class == + ACPI_REFCLASS_REFOF)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Arg (%p) is an ObjRef(Node), storing in node %p\n", new_obj_desc, @@ -638,6 +639,7 @@ acpi_ds_store_object_to_local(u8 type, if (new_obj_desc != obj_desc) { acpi_ut_remove_reference(new_obj_desc); } + return_ACPI_STATUS(status); } } diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 2beb7fd674ae..302c91f5377b 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -463,10 +463,10 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, arg->common.node); } } else { - status = acpi_ds_build_internal_object(walk_state, arg, - &obj_desc-> - package. - elements[i]); + status = + acpi_ds_build_internal_object(walk_state, arg, + &obj_desc->package. + elements[i]); } if (*obj_desc_ptr) { @@ -525,7 +525,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, } ACPI_INFO((AE_INFO, - "Actual Package length (%u) is larger than NumElements field (%u), truncated", + "Actual Package length (%u) is larger than " + "NumElements field (%u), truncated", i, element_count)); } else if (i < element_count) { /* @@ -533,7 +534,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, * Note: this is not an error, the package is padded out with NULLs. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n", + "Package List length (%u) smaller than NumElements " + "count (%u), padded with null elements\n", i, element_count)); } @@ -584,8 +586,9 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, /* Build an internal object for the argument(s) */ - status = acpi_ds_build_internal_object(walk_state, op->common.value.arg, - &obj_desc); + status = + acpi_ds_build_internal_object(walk_state, op->common.value.arg, + &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 81d7b9863e32..1edd66f18907 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -243,8 +243,9 @@ acpi_ds_init_buffer_field(u16 aml_opcode, * For field_flags, use LOCK_RULE = 0 (NO_LOCK), * UPDATE_RULE = 0 (UPDATE_PRESERVE) */ - status = acpi_ex_prep_common_field_object(obj_desc, field_flags, 0, - bit_offset, bit_count); + status = + acpi_ex_prep_common_field_object(obj_desc, field_flags, 0, + bit_offset, bit_count); if (ACPI_FAILURE(status)) { goto cleanup; } @@ -330,8 +331,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state, /* Resolve the operands */ - status = acpi_ex_resolve_operands(op->common.aml_opcode, - ACPI_WALK_OPERANDS, walk_state); + status = + acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, + walk_state); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X", acpi_ps_get_opcode_name(op->common.aml_opcode), @@ -414,8 +416,9 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, /* Resolve the length and address operands to numbers */ - status = acpi_ex_resolve_operands(op->common.aml_opcode, - ACPI_WALK_OPERANDS, walk_state); + status = + acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, + walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -452,7 +455,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, /* Now the address and length are valid for this opregion */ obj_desc->region.flags |= AOPOBJ_DATA_VALID; - return_ACPI_STATUS(status); } @@ -510,8 +512,9 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, * Resolve the Signature string, oem_id string, * and oem_table_id string operands */ - status = acpi_ex_resolve_operands(op->common.aml_opcode, - ACPI_WALK_OPERANDS, walk_state); + status = + acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, + walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index ebc577baeaf9..fa8e2920a3ef 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -245,9 +245,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op, * we will use the return value */ if ((walk_state->control_state->common.state == - ACPI_CONTROL_PREDICATE_EXECUTING) - && (walk_state->control_state->control. - predicate_op == op)) { + ACPI_CONTROL_PREDICATE_EXECUTING) && + (walk_state->control_state->control.predicate_op == + op)) { goto result_used; } break; @@ -481,10 +481,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, /* Get the entire name string from the AML stream */ - status = - acpi_ex_get_name_string(ACPI_TYPE_ANY, - arg->common.value.buffer, - &name_string, &name_length); + status = acpi_ex_get_name_string(ACPI_TYPE_ANY, + arg->common.value.buffer, + &name_string, &name_length); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); @@ -503,9 +502,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, */ if ((walk_state->deferred_node) && (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD) - && (arg_index == - (u32) ((walk_state->opcode == - AML_CREATE_FIELD_OP) ? 3 : 2))) { + && (arg_index == (u32) + ((walk_state->opcode == AML_CREATE_FIELD_OP) ? 3 : 2))) { obj_desc = ACPI_CAST_PTR(union acpi_operand_object, walk_state->deferred_node); @@ -522,9 +520,10 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, op_info = acpi_ps_get_opcode_info(parent_op->common. aml_opcode); - if ((op_info->flags & AML_NSNODE) - && (parent_op->common.aml_opcode != - AML_INT_METHODCALL_OP) + + if ((op_info->flags & AML_NSNODE) && + (parent_op->common.aml_opcode != + AML_INT_METHODCALL_OP) && (parent_op->common.aml_opcode != AML_REGION_OP) && (parent_op->common.aml_opcode != AML_INT_NAMEPATH_OP)) { @@ -605,8 +604,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } - ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object - (obj_desc, walk_state)); + + acpi_db_display_argument_object(obj_desc, walk_state); } else { /* Check for null name case */ @@ -633,15 +632,16 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_NOT_IMPLEMENTED); } - if ((op_info->flags & AML_HAS_RETVAL) - || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) { + if ((op_info->flags & AML_HAS_RETVAL) || + (arg->common.flags & ACPI_PARSEOP_IN_STACK)) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Argument previously created, already stacked\n")); - ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object - (walk_state-> - operands[walk_state->num_operands - - 1], walk_state)); + acpi_db_display_argument_object(walk_state-> + operands[walk_state-> + num_operands - + 1], + walk_state); /* * Use value that was already previously returned @@ -685,8 +685,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object - (obj_desc, walk_state)); + acpi_db_display_argument_object(obj_desc, walk_state); } return_ACPI_STATUS(AE_OK); diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index df54d46225cd..ed2f1d362092 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -172,14 +172,14 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, cleanup: - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Completed a predicate eval=%X Op=%p\n", walk_state->control_state->common.value, walk_state->op)); /* Break to debugger to display result */ - ACPI_DEBUGGER_EXEC(acpi_db_display_result_object - (local_obj_desc, walk_state)); + acpi_db_display_result_object(local_obj_desc, walk_state); /* * Delete the predicate result object (we know that @@ -264,8 +264,8 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, (walk_state->control_state->common.state == ACPI_CONTROL_CONDITIONAL_EXECUTING)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "Exec predicate Op=%p State=%p\n", op, - walk_state)); + "Exec predicate Op=%p State=%p\n", + op, walk_state)); walk_state->control_state->common.state = ACPI_CONTROL_PREDICATE_EXECUTING; @@ -386,11 +386,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) /* Call debugger for single step support (DEBUG build only) */ - ACPI_DEBUGGER_EXEC(status = - acpi_db_single_step(walk_state, op, op_class)); - ACPI_DEBUGGER_EXEC(if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status);} - ) ; + status = acpi_db_single_step(walk_state, op, op_class); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } /* Decode the Opcode Class */ @@ -502,9 +501,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) "Method Reference in a Package, Op=%p\n", op)); - op->common.node = - (struct acpi_namespace_node *)op->asl.value. - arg->asl.node; + op->common.node = (struct acpi_namespace_node *) + op->asl.value.arg->asl.node; acpi_ut_add_reference(op->asl.value.arg->asl. node->object); return_ACPI_STATUS(AE_OK); @@ -586,8 +584,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) * Put the Node on the object stack (Contains the ACPI Name * of this object) */ - walk_state->operands[0] = - (void *)op->common.parent->common.node; + walk_state->operands[0] = (void *) + op->common.parent->common.node; walk_state->num_operands = 1; status = acpi_ds_create_node(walk_state, @@ -692,7 +690,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) default: ACPI_ERROR((AE_INFO, - "Unimplemented opcode, class=0x%X type=0x%X Opcode=0x%X Op=%p", + "Unimplemented opcode, class=0x%X " + "type=0x%X Opcode=0x%X Op=%p", op_class, op_type, op->common.aml_opcode, op)); @@ -728,8 +727,8 @@ cleanup: /* Break to debugger to display result */ - ACPI_DEBUGGER_EXEC(acpi_db_display_result_object - (walk_state->result_obj, walk_state)); + acpi_db_display_result_object(walk_state->result_obj, + walk_state); /* * Delete the result op if and only if: diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 097188a6b1c1..b3254742aaf6 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -476,13 +476,9 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) status = acpi_ex_create_region(op->named.data, op->named.length, - (acpi_adr_space_type) ((op-> - common. - value. - arg)-> - common. - value. - integer), + (acpi_adr_space_type) + ((op->common.value.arg)-> + common.value.integer), walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index e2c08cd79aca..8a32153a111b 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -598,11 +598,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) * Executing a method: initialize the region and unlock * the interpreter */ - status = - acpi_ex_create_region(op->named.data, - op->named.length, - region_space, - walk_state); + status = acpi_ex_create_region(op->named.data, + op->named.length, + region_space, + walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -664,6 +663,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) length, walk_state); } + walk_state->operands[0] = NULL; walk_state->num_operands = 0; diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 43b3ea40c0b6..2d7a04493469 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -77,6 +77,7 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state) "Popped object type (%s)\n", acpi_ut_get_type_name(scope_info->common. value))); + acpi_ut_delete_generic_state(scope_info); } } diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index ccf793247447..112e821a1cec 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -92,8 +92,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); } - gpe_register_info->enable_mask = gpe_register_info->enable_for_run; + gpe_register_info->enable_mask = gpe_register_info->enable_for_run; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index e0f24c504513..c00a9f2f82d5 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -167,6 +167,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) if (gpe_block->next) { gpe_block->next->previous = gpe_block->previous; } + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); } diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 3a958f3612fe..fd5ab9012238 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -346,6 +346,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ACPI_FREE(notify); notify = next; } + gpe_event_info->dispatch.notify_list = NULL; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 74e8595f5a2b..709419c7cde4 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -159,7 +159,7 @@ acpi_ev_has_default_handler(struct acpi_namespace_node *node, obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { - handler_obj = obj_desc->device.handler; + handler_obj = obj_desc->common_notify.handler; /* Walk the linked list of handlers for this object */ @@ -247,35 +247,31 @@ acpi_ev_install_handler(acpi_handle obj_handle, /* Check if this Device already has a handler for this address space */ - next_handler_obj = obj_desc->device.handler; - while (next_handler_obj) { + next_handler_obj = + acpi_ev_find_region_handler(handler_obj->address_space. + space_id, + obj_desc->common_notify. + handler); + if (next_handler_obj) { /* Found a handler, is it for the same address space? */ - if (next_handler_obj->address_space.space_id == - handler_obj->address_space.space_id) { - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, - "Found handler for region [%s] in device %p(%p) " - "handler %p\n", - acpi_ut_get_region_name - (handler_obj->address_space. - space_id), obj_desc, - next_handler_obj, - handler_obj)); - - /* - * Since the object we found it on was a device, then it - * means that someone has already installed a handler for - * the branch of the namespace from this device on. Just - * bail out telling the walk routine to not traverse this - * branch. This preserves the scoping rule for handlers. - */ - return (AE_CTRL_DEPTH); - } - - /* Walk the linked list of handlers attached to this device */ - - next_handler_obj = next_handler_obj->address_space.next; + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, + "Found handler for region [%s] in device %p(%p) handler %p\n", + acpi_ut_get_region_name(handler_obj-> + address_space. + space_id), + obj_desc, next_handler_obj, + handler_obj)); + + /* + * Since the object we found it on was a device, then it means + * that someone has already installed a handler for the branch + * of the namespace from this device on. Just bail out telling + * the walk routine to not traverse this branch. This preserves + * the scoping rule for handlers. + */ + return (AE_CTRL_DEPTH); } /* @@ -309,6 +305,44 @@ acpi_ev_install_handler(acpi_handle obj_handle, /******************************************************************************* * + * FUNCTION: acpi_ev_find_region_handler + * + * PARAMETERS: space_id - The address space ID + * handler_obj - Head of the handler object list + * + * RETURN: Matching handler object. NULL if space ID not matched + * + * DESCRIPTION: Search a handler object list for a match on the address + * space ID. + * + ******************************************************************************/ + +union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type + space_id, + union acpi_operand_object + *handler_obj) +{ + + /* Walk the handler list for this device */ + + while (handler_obj) { + + /* Same space_id indicates a handler is installed */ + + if (handler_obj->address_space.space_id == space_id) { + return (handler_obj); + } + + /* Next handler object */ + + handler_obj = handler_obj->address_space.next; + } + + return (NULL); +} + +/******************************************************************************* + * * FUNCTION: acpi_ev_install_space_handler * * PARAMETERS: node - Namespace node for the device @@ -332,15 +366,15 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; - acpi_status status; + acpi_status status = AE_OK; acpi_object_type type; u8 flags = 0; ACPI_FUNCTION_TRACE(ev_install_space_handler); /* - * This registration is valid for only the types below and the root. This - * is where the default handlers get placed. + * This registration is valid for only the types below and the root. + * The root node is where the default handlers get installed. */ if ((node->type != ACPI_TYPE_DEVICE) && (node->type != ACPI_TYPE_PROCESSOR) && @@ -407,38 +441,30 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* - * The attached device object already exists. Make sure the handler - * is not already installed. + * The attached device object already exists. Now make sure + * the handler is not already installed. */ - handler_obj = obj_desc->device.handler; - - /* Walk the handler list for this device */ - - while (handler_obj) { - - /* Same space_id indicates a handler already installed */ + handler_obj = acpi_ev_find_region_handler(space_id, + obj_desc-> + common_notify. + handler); - if (handler_obj->address_space.space_id == space_id) { - if (handler_obj->address_space.handler == - handler) { - /* - * It is (relatively) OK to attempt to install the SAME - * handler twice. This can easily happen with the - * PCI_Config space. - */ - status = AE_SAME_HANDLER; - goto unlock_and_exit; - } else { - /* A handler is already installed */ - - status = AE_ALREADY_EXISTS; - } + if (handler_obj) { + if (handler_obj->address_space.handler == handler) { + /* + * It is (relatively) OK to attempt to install the SAME + * handler twice. This can easily happen with the + * PCI_Config space. + */ + status = AE_SAME_HANDLER; goto unlock_and_exit; - } + } else { + /* A handler is already installed */ - /* Walk the linked list of handlers */ + status = AE_ALREADY_EXISTS; + } - handler_obj = handler_obj->address_space.next; + goto unlock_and_exit; } } else { ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, @@ -477,7 +503,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, } ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, - "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", + "Installing address handler for region %s(%X) " + "on Device %4.4s %p(%p)\n", acpi_ut_get_region_name(space_id), space_id, acpi_ut_get_node_name(node), node, obj_desc)); @@ -506,28 +533,26 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, /* Install at head of Device.address_space list */ - handler_obj->address_space.next = obj_desc->device.handler; + handler_obj->address_space.next = obj_desc->common_notify.handler; /* * The Device object is the first reference on the handler_obj. * Each region that uses the handler adds a reference. */ - obj_desc->device.handler = handler_obj; + obj_desc->common_notify.handler = handler_obj; /* - * Walk the namespace finding all of the regions this - * handler will manage. + * Walk the namespace finding all of the regions this handler will + * manage. * - * Start at the device and search the branch toward - * the leaf nodes until either the leaf is encountered or - * a device is detected that has an address handler of the - * same type. + * Start at the device and search the branch toward the leaf nodes + * until either the leaf is encountered or a device is detected that + * has an address handler of the same type. * - * In either case, back up and search down the remainder - * of the branch + * In either case, back up and search down the remainder of the branch */ - status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, - ACPI_NS_WALK_UNLOCK, + status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, + ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler, NULL, handler_obj, NULL); diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index f7c9dfe7b990..8866f50d38f7 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -68,6 +68,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) { + switch (node->type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: @@ -170,8 +171,8 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, acpi_ut_get_notify_name(notify_value, ACPI_TYPE_ANY), node)); - status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, - info); + status = acpi_os_execute(OSL_NOTIFY_HANDLER, + acpi_ev_notify_dispatch, info); if (ACPI_FAILURE(status)) { acpi_ut_delete_generic_state(info); } diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 5ee79a16fe33..a43178f20c59 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -97,15 +97,12 @@ acpi_status acpi_ev_initialize_op_regions(void) if (acpi_ev_has_default_handler(acpi_gbl_root_node, acpi_gbl_default_address_spaces [i])) { - status = - acpi_ev_execute_reg_methods(acpi_gbl_root_node, - acpi_gbl_default_address_spaces - [i]); + acpi_ev_execute_reg_methods(acpi_gbl_root_node, + acpi_gbl_default_address_spaces + [i], ACPI_REG_CONNECT); } } - acpi_gbl_reg_methods_executed = TRUE; - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } @@ -127,6 +124,12 @@ acpi_status acpi_ev_initialize_op_regions(void) * DESCRIPTION: Dispatch an address space or operation region access to * a previously installed handler. * + * NOTE: During early initialization, we always install the default region + * handlers for Memory, I/O and PCI_Config. This ensures that these operation + * region address spaces are always available as per the ACPI specification. + * This is especially needed in order to support the execution of + * module-level AML code during loading of the ACPI tables. + * ******************************************************************************/ acpi_status @@ -498,6 +501,12 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj, ACPI_FUNCTION_TRACE(ev_attach_region); + /* Install the region's handler */ + + if (region_obj->region.handler) { + return_ACPI_STATUS(AE_ALREADY_EXISTS); + } + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Adding Region [%4.4s] %p to address handler %p [%s]\n", acpi_ut_get_node_name(region_obj->region.node), @@ -509,17 +518,56 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj, region_obj->region.next = handler_obj->address_space.region_list; handler_obj->address_space.region_list = region_obj; + region_obj->region.handler = handler_obj; + acpi_ut_add_reference(handler_obj); - /* Install the region's handler */ + return_ACPI_STATUS(AE_OK); +} - if (region_obj->region.handler) { - return_ACPI_STATUS(AE_ALREADY_EXISTS); +/******************************************************************************* + * + * FUNCTION: acpi_ev_associate_reg_method + * + * PARAMETERS: region_obj - Region object + * + * RETURN: Status + * + * DESCRIPTION: Find and associate _REG method to a region + * + ******************************************************************************/ + +void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj) +{ + acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; + struct acpi_namespace_node *method_node; + struct acpi_namespace_node *node; + union acpi_operand_object *region_obj2; + acpi_status status; + + ACPI_FUNCTION_TRACE(ev_associate_reg_method); + + region_obj2 = acpi_ns_get_secondary_object(region_obj); + if (!region_obj2) { + return_VOID; } - region_obj->region.handler = handler_obj; - acpi_ut_add_reference(handler_obj); + node = region_obj->region.node->parent; - return_ACPI_STATUS(AE_OK); + /* Find any "_REG" method associated with this region definition */ + + status = + acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD, + &method_node); + if (ACPI_SUCCESS(status)) { + /* + * The _REG method is optional and there can be only one per region + * definition. This will be executed when the handler is attached + * or removed + */ + region_obj2->extra.method_REG = method_node; + } + + return_VOID; } /******************************************************************************* @@ -550,7 +598,18 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) return_ACPI_STATUS(AE_NOT_EXIST); } - if (region_obj2->extra.method_REG == NULL) { + if (region_obj2->extra.method_REG == NULL || + region_obj->region.handler == NULL || + !acpi_gbl_reg_methods_enabled) { + return_ACPI_STATUS(AE_OK); + } + + /* _REG(DISCONNECT) should be paired with _REG(CONNECT) */ + + if ((function == ACPI_REG_CONNECT && + region_obj->common.flags & AOPOBJ_REG_CONNECTED) || + (function == ACPI_REG_DISCONNECT && + !(region_obj->common.flags & AOPOBJ_REG_CONNECTED))) { return_ACPI_STATUS(AE_OK); } @@ -599,6 +658,16 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) status = acpi_ns_evaluate(info); acpi_ut_remove_reference(args[1]); + if (ACPI_FAILURE(status)) { + goto cleanup2; + } + + if (function == ACPI_REG_CONNECT) { + region_obj->common.flags |= AOPOBJ_REG_CONNECTED; + } else { + region_obj->common.flags &= ~AOPOBJ_REG_CONNECTED; + } + cleanup2: acpi_ut_remove_reference(args[0]); @@ -613,24 +682,25 @@ cleanup1: * * PARAMETERS: node - Namespace node for the device * space_id - The address space ID + * function - Passed to _REG: On (1) or Off (0) * - * RETURN: Status + * RETURN: None * * DESCRIPTION: Run all _REG methods for the input Space ID; * Note: assumes namespace is locked, or system init time. * ******************************************************************************/ -acpi_status +void acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, - acpi_adr_space_type space_id) + acpi_adr_space_type space_id, u32 function) { - acpi_status status; struct acpi_reg_walk_info info; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); info.space_id = space_id; + info.function = function; info.reg_run_count = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, @@ -643,9 +713,9 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ - status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, - ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, - NULL, &info, NULL); + (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, + ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, + &info, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ @@ -658,7 +728,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, info.reg_run_count, acpi_ut_get_region_name(info.space_id))); - return_ACPI_STATUS(status); + return_VOID; } /******************************************************************************* @@ -717,7 +787,7 @@ acpi_ev_reg_run(acpi_handle obj_handle, } info->reg_run_count++; - status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT); + status = acpi_ev_execute_reg_method(obj_desc, info->function); return (status); } diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index da323390bb70..bb2e529249c7 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -507,9 +507,6 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, acpi_adr_space_type space_id; struct acpi_namespace_node *node; acpi_status status; - struct acpi_namespace_node *method_node; - acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; - union acpi_operand_object *region_obj2; ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked); @@ -521,38 +518,15 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, return_ACPI_STATUS(AE_OK); } - region_obj2 = acpi_ns_get_secondary_object(region_obj); - if (!region_obj2) { - return_ACPI_STATUS(AE_NOT_EXIST); - } + acpi_ev_associate_reg_method(region_obj); + region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; node = region_obj->region.node->parent; space_id = region_obj->region.space_id; - /* Setup defaults */ - - region_obj->region.handler = NULL; - region_obj2->extra.method_REG = NULL; - region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE); - region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; - - /* Find any "_REG" method associated with this region definition */ - - status = - acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD, - &method_node); - if (ACPI_SUCCESS(status)) { - /* - * The _REG method is optional and there can be only one per region - * definition. This will be executed when the handler is attached - * or removed - */ - region_obj2->extra.method_REG = method_node; - } - /* * The following loop depends upon the root Node having no parent - * ie: acpi_gbl_root_node->parent_entry being set to NULL + * ie: acpi_gbl_root_node->Parent being set to NULL */ while (node) { @@ -566,18 +540,10 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, switch (node->type) { case ACPI_TYPE_DEVICE: - - handler_obj = obj_desc->device.handler; - break; - case ACPI_TYPE_PROCESSOR: - - handler_obj = obj_desc->processor.handler; - break; - case ACPI_TYPE_THERMAL: - handler_obj = obj_desc->thermal_zone.handler; + handler_obj = obj_desc->common_notify.handler; break; case ACPI_TYPE_METHOD: @@ -602,60 +568,49 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, break; } - while (handler_obj) { - - /* Is this handler of the correct type? */ + handler_obj = + acpi_ev_find_region_handler(space_id, handler_obj); + if (handler_obj) { - if (handler_obj->address_space.space_id == - space_id) { + /* Found correct handler */ - /* Found correct handler */ + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, + "Found handler %p for region %p in obj %p\n", + handler_obj, region_obj, + obj_desc)); - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, - "Found handler %p for region %p in obj %p\n", - handler_obj, + status = + acpi_ev_attach_region(handler_obj, region_obj, - obj_desc)); + acpi_ns_locked); + /* + * Tell all users that this region is usable by + * running the _REG method + */ + if (acpi_ns_locked) { status = - acpi_ev_attach_region(handler_obj, - region_obj, - acpi_ns_locked); - - /* - * Tell all users that this region is usable by - * running the _REG method - */ - if (acpi_ns_locked) { - status = - acpi_ut_release_mutex - (ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS - (status); - } + acpi_ut_release_mutex + (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); } + } + status = + acpi_ev_execute_reg_method(region_obj, + ACPI_REG_CONNECT); + + if (acpi_ns_locked) { status = - acpi_ev_execute_reg_method - (region_obj, ACPI_REG_CONNECT); - - if (acpi_ns_locked) { - status = - acpi_ut_acquire_mutex - (ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS - (status); - } + acpi_ut_acquire_mutex + (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); } - - return_ACPI_STATUS(AE_OK); } - /* Try next handler in the list */ - - handler_obj = handler_obj->address_space.next; + return_ACPI_STATUS(AE_OK); } } diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 07d22bfbaa00..012b9dedfa79 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -879,9 +879,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device, ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); - status = - acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE, - address, context); + status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, + FALSE, address, context); return_ACPI_STATUS(status); } @@ -914,8 +913,8 @@ acpi_install_gpe_raw_handler(acpi_handle gpe_device, ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler); - status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE, - address, context); + status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, + TRUE, address, context); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index f21afbab03f7..35f9e60ce2b7 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -112,41 +112,9 @@ acpi_install_address_space_handler(acpi_handle device, goto unlock_and_exit; } - /* - * For the default space_IDs, (the IDs for which there are default region handlers - * installed) Only execute the _REG methods if the global initialization _REG - * methods have already been run (via acpi_initialize_objects). In other words, - * we will defer the execution of the _REG methods for these space_IDs until - * execution of acpi_initialize_objects. This is done because we need the handlers - * for the default spaces (mem/io/pci/table) to be installed before we can run - * any control methods (or _REG methods). There is known BIOS code that depends - * on this. - * - * For all other space_IDs, we can safely execute the _REG methods immediately. - * This means that for IDs like embedded_controller, this function should be called - * only after acpi_enable_subsystem has been called. - */ - switch (space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - case ACPI_ADR_SPACE_SYSTEM_IO: - case ACPI_ADR_SPACE_PCI_CONFIG: - case ACPI_ADR_SPACE_DATA_TABLE: - - if (!acpi_gbl_reg_methods_executed) { - - /* We will defer execution of the _REG methods for this space */ - goto unlock_and_exit; - } - break; - - default: - - break; - } - /* Run all _REG methods for this address space */ - status = acpi_ev_execute_reg_methods(node, space_id); + acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); @@ -215,8 +183,8 @@ acpi_remove_address_space_handler(acpi_handle device, /* Find the address handler the user requested */ - handler_obj = obj_desc->device.handler; - last_obj_ptr = &obj_desc->device.handler; + handler_obj = obj_desc->common_notify.handler; + last_obj_ptr = &obj_desc->common_notify.handler; while (handler_obj) { /* We have a handler, see if user requested this one */ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index b540913c11ac..adcb9c7029c4 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -358,8 +358,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, } /* - * If the Region Address and Length have not been previously evaluated, - * evaluate them now and save the results. + * If the Region Address and Length have not been previously + * evaluated, evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_region_arguments(obj_desc); @@ -454,8 +454,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, } /* - * Copy the table from the buffer because the buffer could be modified - * or even deleted in the future + * Copy the table from the buffer because the buffer could be + * modified or even deleted in the future */ table = ACPI_ALLOCATE(length); if (!table) { diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 1e4c5b6dc0b0..73c2e823488d 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -227,8 +227,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, /* Copy the integer to the buffer, LSB first */ new_buf = return_desc->buffer.pointer; - memcpy(new_buf, - &obj_desc->integer.value, acpi_gbl_integer_byte_width); + memcpy(new_buf, &obj_desc->integer.value, + acpi_gbl_integer_byte_width); break; case ACPI_TYPE_STRING: @@ -354,9 +354,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) /* Get one hex digit, most significant digits first */ - string[k] = - (u8) acpi_ut_hex_to_ascii_char(integer, - ACPI_MUL_4(j)); + string[k] = (u8) + acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j)); k++; } break; diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index ccb7219bdcee..46be5a276863 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -189,9 +189,9 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state) /* Attach object to the Node */ - status = - acpi_ns_attach_object((struct acpi_namespace_node *)walk_state-> - operands[0], obj_desc, ACPI_TYPE_EVENT); + status = acpi_ns_attach_object((struct acpi_namespace_node *) + walk_state->operands[0], obj_desc, + ACPI_TYPE_EVENT); cleanup: /* @@ -326,9 +326,10 @@ acpi_ex_create_region(u8 * aml_start, * Remember location in AML stream of address & length * operands since they need to be evaluated at run time. */ - region_obj2 = obj_desc->common.next_object; + region_obj2 = acpi_ns_get_secondary_object(obj_desc); region_obj2->extra.aml_start = aml_start; region_obj2->extra.aml_length = aml_length; + region_obj2->extra.method_REG = NULL; if (walk_state->scope_info) { region_obj2->extra.scope_node = walk_state->scope_info->scope.node; @@ -342,6 +343,10 @@ acpi_ex_create_region(u8 * aml_start, obj_desc->region.address = 0; obj_desc->region.length = 0; obj_desc->region.node = node; + obj_desc->region.handler = NULL; + obj_desc->common.flags &= + ~(AOPOBJ_SETUP_COMPLETE | AOPOBJ_REG_CONNECTED | + AOPOBJ_OBJECT_INITIALIZED); /* Install the new region object in the parent Node */ @@ -492,10 +497,9 @@ acpi_ex_create_method(u8 * aml_start, * Disassemble the method flags. Split off the arg_count, Serialized * flag, and sync_level for efficiency. */ - method_flags = (u8) operand[1]->integer.value; - - obj_desc->method.param_count = - (u8) (method_flags & AML_METHOD_ARG_COUNT); + method_flags = (u8)operand[1]->integer.value; + obj_desc->method.param_count = (u8) + (method_flags & AML_METHOD_ARG_COUNT); /* * Get the sync_level. If method is serialized, a mutex will be diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index de92458236f5..b22309094c5f 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -43,21 +43,11 @@ #include <acpi/acpi.h> #include "accommon.h" -#include "acnamesp.h" #include "acinterp.h" -#include "acparser.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdebug") -static union acpi_operand_object *acpi_gbl_trace_method_object = NULL; - -/* Local prototypes */ - -#ifdef ACPI_DEBUG_OUTPUT -static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type); -#endif - #ifndef ACPI_NO_ERROR_MESSAGES /******************************************************************************* * @@ -80,7 +70,6 @@ static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type); * enabled if necessary. * ******************************************************************************/ - void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) @@ -99,20 +88,40 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, return_VOID; } - /* - * We will emit the current timer value (in microseconds) with each - * debug output. Only need the lower 26 bits. This allows for 67 - * million microseconds or 67 seconds before rollover. - */ - timer = ((u32)acpi_os_get_timer() / 10); /* (100 nanoseconds to microseconds) */ - timer &= 0x03FFFFFF; + /* Null string or newline -- don't emit the line header */ + + if (source_desc && + (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) && + (source_desc->common.type == ACPI_TYPE_STRING)) { + if ((source_desc->string.length == 0) || + ((source_desc->string.length == 1) && + (*source_desc->string.pointer == '\n'))) { + acpi_os_printf("\n"); + return_VOID; + } + } /* * Print line header as long as we are not in the middle of an * object display */ if (!((level > 0) && index == 0)) { - acpi_os_printf("[ACPI Debug %.8u] %*s", timer, level, " "); + if (acpi_gbl_display_debug_timer) { + /* + * We will emit the current timer value (in microseconds) with each + * debug output. Only need the lower 26 bits. This allows for 67 + * million microseconds or 67 seconds before rollover. + * + * Convert 100 nanosecond units to microseconds + */ + timer = ((u32)acpi_os_get_timer() / 10); + timer &= 0x03FFFFFF; + + acpi_os_printf("[ACPI Debug T=0x%8.8X] %*s", timer, + level, " "); + } else { + acpi_os_printf("[ACPI Debug] %*s", level, " "); + } } /* Display the index for package output only */ @@ -127,8 +136,15 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { - acpi_os_printf("%s ", - acpi_ut_get_object_type_name(source_desc)); + + /* No object type prefix needed for integers and strings */ + + if ((source_desc->common.type != ACPI_TYPE_INTEGER) && + (source_desc->common.type != ACPI_TYPE_STRING)) { + acpi_os_printf("%s ", + acpi_ut_get_object_type_name + (source_desc)); + } if (!acpi_ut_valid_internal_object(source_desc)) { acpi_os_printf("%p, Invalid Internal Object!\n", @@ -137,7 +153,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { - acpi_os_printf("%s: %p\n", + acpi_os_printf("%s (Node %p)\n", acpi_ut_get_type_name(((struct acpi_namespace_node *) source_desc)->type), @@ -175,14 +191,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, case ACPI_TYPE_STRING: - acpi_os_printf("[0x%.2X] \"%s\"\n", - source_desc->string.length, - source_desc->string.pointer); + acpi_os_printf("\"%s\"\n", source_desc->string.pointer); break; case ACPI_TYPE_PACKAGE: - acpi_os_printf("[Contains 0x%.2X Elements]\n", + acpi_os_printf("(Contains 0x%.2X Elements):\n", source_desc->package.count); /* Output the entire contents of the package */ @@ -261,11 +275,14 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { - acpi_ex_do_debug_object(((struct - acpi_namespace_node *) + + /* Reference object is a namespace node */ + + acpi_ex_do_debug_object(ACPI_CAST_PTR + (union + acpi_operand_object, source_desc->reference. - object)->object, - level + 4, 0); + object), level + 4, 0); } else { object_desc = source_desc->reference.object; value = source_desc->reference.value; @@ -293,9 +310,14 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, case ACPI_TYPE_PACKAGE: acpi_os_printf("Package[%u] = ", value); - acpi_ex_do_debug_object(*source_desc-> - reference.where, - level + 4, 0); + if (!(*source_desc->reference.where)) { + acpi_os_printf + ("[Uninitialized Package Element]\n"); + } else { + acpi_ex_do_debug_object + (*source_desc->reference. + where, level + 4, 0); + } break; default: @@ -311,7 +333,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, default: - acpi_os_printf("%p\n", source_desc); + acpi_os_printf("(Descriptor %p)\n", source_desc); break; } @@ -319,316 +341,3 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, return_VOID; } #endif - -/******************************************************************************* - * - * FUNCTION: acpi_ex_interpreter_trace_enabled - * - * PARAMETERS: name - Whether method name should be matched, - * this should be checked before starting - * the tracer - * - * RETURN: TRUE if interpreter trace is enabled. - * - * DESCRIPTION: Check whether interpreter trace is enabled - * - ******************************************************************************/ - -static u8 acpi_ex_interpreter_trace_enabled(char *name) -{ - - /* Check if tracing is enabled */ - - if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) { - return (FALSE); - } - - /* - * Check if tracing is filtered: - * - * 1. If the tracer is started, acpi_gbl_trace_method_object should have - * been filled by the trace starter - * 2. If the tracer is not started, acpi_gbl_trace_method_name should be - * matched if it is specified - * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should - * not be cleared by the trace stopper during the first match - */ - if (acpi_gbl_trace_method_object) { - return (TRUE); - } - if (name && - (acpi_gbl_trace_method_name && - strcmp(acpi_gbl_trace_method_name, name))) { - return (FALSE); - } - if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) && - !acpi_gbl_trace_method_name) { - return (FALSE); - } - - return (TRUE); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ex_get_trace_event_name - * - * PARAMETERS: type - Trace event type - * - * RETURN: Trace event name. - * - * DESCRIPTION: Used to obtain the full trace event name. - * - ******************************************************************************/ - -#ifdef ACPI_DEBUG_OUTPUT - -static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type) -{ - switch (type) { - case ACPI_TRACE_AML_METHOD: - - return "Method"; - - case ACPI_TRACE_AML_OPCODE: - - return "Opcode"; - - case ACPI_TRACE_AML_REGION: - - return "Region"; - - default: - - return ""; - } -} - -#endif - -/******************************************************************************* - * - * FUNCTION: acpi_ex_trace_point - * - * PARAMETERS: type - Trace event type - * begin - TRUE if before execution - * aml - Executed AML address - * pathname - Object path - * - * RETURN: None - * - * DESCRIPTION: Internal interpreter execution trace. - * - ******************************************************************************/ - -void -acpi_ex_trace_point(acpi_trace_event_type type, - u8 begin, u8 *aml, char *pathname) -{ - - ACPI_FUNCTION_NAME(ex_trace_point); - - if (pathname) { - ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, - "%s %s [0x%p:%s] execution.\n", - acpi_ex_get_trace_event_name(type), - begin ? "Begin" : "End", aml, pathname)); - } else { - ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, - "%s %s [0x%p] execution.\n", - acpi_ex_get_trace_event_name(type), - begin ? "Begin" : "End", aml)); - } -} - -/******************************************************************************* - * - * FUNCTION: acpi_ex_start_trace_method - * - * PARAMETERS: method_node - Node of the method - * obj_desc - The method object - * walk_state - current state, NULL if not yet executing - * a method. - * - * RETURN: None - * - * DESCRIPTION: Start control method execution trace - * - ******************************************************************************/ - -void -acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, - union acpi_operand_object *obj_desc, - struct acpi_walk_state *walk_state) -{ - acpi_status status; - char *pathname = NULL; - u8 enabled = FALSE; - - ACPI_FUNCTION_NAME(ex_start_trace_method); - - if (method_node) { - pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); - } - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit; - } - - enabled = acpi_ex_interpreter_trace_enabled(pathname); - if (enabled && !acpi_gbl_trace_method_object) { - acpi_gbl_trace_method_object = obj_desc; - acpi_gbl_original_dbg_level = acpi_dbg_level; - acpi_gbl_original_dbg_layer = acpi_dbg_layer; - acpi_dbg_level = ACPI_TRACE_LEVEL_ALL; - acpi_dbg_layer = ACPI_TRACE_LAYER_ALL; - - if (acpi_gbl_trace_dbg_level) { - acpi_dbg_level = acpi_gbl_trace_dbg_level; - } - if (acpi_gbl_trace_dbg_layer) { - acpi_dbg_layer = acpi_gbl_trace_dbg_layer; - } - } - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - -exit: - if (enabled) { - ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE, - obj_desc ? obj_desc->method.aml_start : NULL, - pathname); - } - if (pathname) { - ACPI_FREE(pathname); - } -} - -/******************************************************************************* - * - * FUNCTION: acpi_ex_stop_trace_method - * - * PARAMETERS: method_node - Node of the method - * obj_desc - The method object - * walk_state - current state, NULL if not yet executing - * a method. - * - * RETURN: None - * - * DESCRIPTION: Stop control method execution trace - * - ******************************************************************************/ - -void -acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, - union acpi_operand_object *obj_desc, - struct acpi_walk_state *walk_state) -{ - acpi_status status; - char *pathname = NULL; - u8 enabled; - - ACPI_FUNCTION_NAME(ex_stop_trace_method); - - if (method_node) { - pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); - } - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit_path; - } - - enabled = acpi_ex_interpreter_trace_enabled(NULL); - - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - - if (enabled) { - ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE, - obj_desc ? obj_desc->method.aml_start : NULL, - pathname); - } - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit_path; - } - - /* Check whether the tracer should be stopped */ - - if (acpi_gbl_trace_method_object == obj_desc) { - - /* Disable further tracing if type is one-shot */ - - if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) { - acpi_gbl_trace_method_name = NULL; - } - - acpi_dbg_level = acpi_gbl_original_dbg_level; - acpi_dbg_layer = acpi_gbl_original_dbg_layer; - acpi_gbl_trace_method_object = NULL; - } - - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - -exit_path: - if (pathname) { - ACPI_FREE(pathname); - } -} - -/******************************************************************************* - * - * FUNCTION: acpi_ex_start_trace_opcode - * - * PARAMETERS: op - The parser opcode object - * walk_state - current state, NULL if not yet executing - * a method. - * - * RETURN: None - * - * DESCRIPTION: Start opcode execution trace - * - ******************************************************************************/ - -void -acpi_ex_start_trace_opcode(union acpi_parse_object *op, - struct acpi_walk_state *walk_state) -{ - - ACPI_FUNCTION_NAME(ex_start_trace_opcode); - - if (acpi_ex_interpreter_trace_enabled(NULL) && - (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { - ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE, - op->common.aml, op->common.aml_op_name); - } -} - -/******************************************************************************* - * - * FUNCTION: acpi_ex_stop_trace_opcode - * - * PARAMETERS: op - The parser opcode object - * walk_state - current state, NULL if not yet executing - * a method. - * - * RETURN: None - * - * DESCRIPTION: Stop opcode execution trace - * - ******************************************************************************/ - -void -acpi_ex_stop_trace_opcode(union acpi_parse_object *op, - struct acpi_walk_state *walk_state) -{ - - ACPI_FUNCTION_NAME(ex_stop_trace_opcode); - - if (acpi_ex_interpreter_trace_enabled(NULL) && - (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { - ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE, - op->common.aml, op->common.aml_op_name); - } -} diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index d836f888bb16..ff976c43b992 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -508,7 +508,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, if (next) { acpi_os_printf("(%s %2.2X)", acpi_ut_get_object_type_name - (next), next->common.type); + (next), + next->address_space.space_id); while (next->address_space.next) { if ((next->common.type == @@ -520,7 +521,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, acpi_os_printf("->%p(%s %2.2X)", next, acpi_ut_get_object_type_name (next), - next->common.type); + next->address_space. + space_id); if ((next == start) || (next == data)) { acpi_os_printf diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 61fd9c7b88bc..ad7080ba65e2 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -167,10 +167,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold - * the data and then directly access the region handler. + * This is an SMBus, GSBus or IPMI read. We must create a buffer to + * hold the data and then directly access the region handler. * - * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function + * Note: SMBus and GSBus protocol value is passed in upper 16-bits + * of Function */ if (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS) { @@ -180,17 +181,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, } else if (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) { accessor_type = obj_desc->field.attribute; - length = acpi_ex_get_serial_access_length(accessor_type, - obj_desc-> - field. - access_length); + length = + acpi_ex_get_serial_access_length(accessor_type, + obj_desc->field. + access_length); /* * Add additional 2 bytes for the generic_serial_bus data buffer: * - * Status; (Byte 0 of the data buffer) - * Length; (Byte 1 of the data buffer) - * Data[x-1]; (Bytes 2-x of the arbitrary length data buffer) + * Status; (Byte 0 of the data buffer) + * Length; (Byte 1 of the data buffer) + * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer) */ length += 2; function = ACPI_READ | (accessor_type << 16); @@ -216,6 +217,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, buffer_desc-> buffer.pointer), function); + acpi_ex_release_global_lock(obj_desc->common_field.field_flags); goto exit; } @@ -232,6 +234,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, */ length = (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); + if (length > acpi_gbl_integer_byte_width) { /* Field is too large for an Integer, create a Buffer instead */ @@ -273,8 +276,10 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, /* Perform the write */ - status = acpi_ex_access_region(obj_desc, 0, - (u64 *)buffer, ACPI_READ); + status = + acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, + ACPI_READ); + acpi_ex_release_global_lock(obj_desc->common_field.field_flags); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(buffer_desc); @@ -366,19 +371,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus, GSBus or IPMI write. We will bypass the entire field - * mechanism and handoff the buffer directly to the handler. For - * these address spaces, the buffer is bi-directional; on a write, - * return data is returned in the same buffer. + * This is an SMBus, GSBus or IPMI write. We will bypass the entire + * field mechanism and handoff the buffer directly to the handler. + * For these address spaces, the buffer is bi-directional; on a + * write, return data is returned in the same buffer. * * Source must be a buffer of sufficient size: - * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE. + * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or + * ACPI_IPMI_BUFFER_SIZE. * - * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function + * Note: SMBus and GSBus protocol type is passed in upper 16-bits + * of Function */ if (source_desc->common.type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, - "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s", + "SMBus/IPMI/GenericSerialBus write requires " + "Buffer, found type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); @@ -392,17 +400,17 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, } else if (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) { accessor_type = obj_desc->field.attribute; - length = acpi_ex_get_serial_access_length(accessor_type, - obj_desc-> - field. - access_length); + length = + acpi_ex_get_serial_access_length(accessor_type, + obj_desc->field. + access_length); /* * Add additional 2 bytes for the generic_serial_bus data buffer: * - * Status; (Byte 0 of the data buffer) - * Length; (Byte 1 of the data buffer) - * Data[x-1]; (Bytes 2-x of the arbitrary length data buffer) + * Status; (Byte 0 of the data buffer) + * Length; (Byte 1 of the data buffer) + * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer) */ length += 2; function = ACPI_WRITE | (accessor_type << 16); @@ -414,7 +422,8 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, if (source_desc->buffer.length < length) { ACPI_ERROR((AE_INFO, - "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u", + "SMBus/IPMI/GenericSerialBus write requires " + "Buffer of length %u, found length %u", length, source_desc->buffer.length)); return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); @@ -438,8 +447,8 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, * Perform the write (returns status and perhaps data in the * same buffer) */ - status = acpi_ex_access_region(obj_desc, 0, - (u64 *) buffer, function); + status = + acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, function); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); *result_desc = buffer_desc; @@ -460,7 +469,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, - "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n", + "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n", acpi_ut_get_type_name(source_desc->common. type), source_desc->common.type, @@ -476,8 +485,9 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, /* Perform the write */ - status = acpi_ex_access_region(obj_desc, 0, - (u64 *)buffer, ACPI_WRITE); + status = + acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, + ACPI_WRITE); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 70b7bbbb860b..0337191dbf3d 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -180,7 +180,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, * byte, and a field with Dword access specified. */ ACPI_ERROR((AE_INFO, - "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)", + "Field [%4.4s] access width (%u bytes) " + "too large for region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc-> common_field.node), obj_desc->common_field.access_byte_width, @@ -194,7 +195,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, * exceeds region length, indicate an error */ ACPI_ERROR((AE_INFO, - "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)", + "Field [%4.4s] Base+Offset+Width %u+%u+%u " + "is beyond end of region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc->common_field.node), obj_desc->common_field.base_byte_offset, field_datum_byte_offset, @@ -638,15 +640,15 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc, ACPI_ERROR((AE_INFO, "Unknown UpdateRule value: 0x%X", - (obj_desc->common_field. - field_flags & + (obj_desc->common_field.field_flags & AML_FIELD_UPDATE_RULE_MASK))); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, - "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n", + "Mask %8.8X%8.8X, DatumOffset %X, Width %X, " + "Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n", ACPI_FORMAT_UINT64(mask), field_datum_byte_offset, obj_desc->common_field.access_byte_width, @@ -655,8 +657,9 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc, /* Write the merged value */ - status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, - &merged_value, ACPI_WRITE); + status = + acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, + &merged_value, ACPI_WRITE); return_ACPI_STATUS(status); } @@ -764,8 +767,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, /* Get next input datum from the field */ field_offset += obj_desc->common_field.access_byte_width; - status = acpi_ex_field_datum_io(obj_desc, field_offset, - &raw_datum, ACPI_READ); + status = + acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum, + ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -858,6 +862,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, new_buffer = NULL; required_length = ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); + /* * We must have a buffer that is at least as long as the field * we are writing to. This is because individual fields are @@ -932,9 +937,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, /* Write merged datum to the target field */ merged_datum &= mask; - status = acpi_ex_write_with_update_rule(obj_desc, mask, - merged_datum, - field_offset); + status = + acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, + field_offset); if (ACPI_FAILURE(status)) { goto exit; } @@ -990,9 +995,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, /* Write the last datum to the field */ merged_datum &= mask; - status = acpi_ex_write_with_update_rule(obj_desc, - mask, merged_datum, - field_offset); + status = + acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, + field_offset); exit: /* Free temporary buffer if we used one */ diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index d02afece0f10..f598b3948c17 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -98,9 +98,9 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, default: - ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", + ACPI_ERROR((AE_INFO, "Invalid Reference Class 0x%2.2X", obj_desc->reference.class)); - return_ACPI_STATUS(AE_AML_INTERNAL); + return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; @@ -247,6 +247,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, union acpi_operand_object *local_operand1 = operand1; union acpi_operand_object *return_desc; char *new_buf; + const char *type_string; acpi_status status; ACPI_FUNCTION_TRACE(ex_do_concatenate); @@ -266,9 +267,41 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, break; case ACPI_TYPE_STRING: + /* + * Per the ACPI spec, Concatenate only supports int/str/buf. + * However, we support all objects here as an extension. + * This improves the usefulness of the Printf() macro. + * 12/2015. + */ + switch (operand1->common.type) { + case ACPI_TYPE_INTEGER: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + + status = + acpi_ex_convert_to_string(operand1, &local_operand1, + ACPI_IMPLICIT_CONVERT_HEX); + break; + + default: + /* + * Just emit a string containing the object type. + */ + type_string = + acpi_ut_get_type_name(operand1->common.type); + + local_operand1 = acpi_ut_create_string_object(((acpi_size) strlen(type_string) + 9)); /* 9 For "[Object]" */ + if (!local_operand1) { + status = AE_NO_MEMORY; + goto cleanup; + } - status = acpi_ex_convert_to_string(operand1, &local_operand1, - ACPI_IMPLICIT_CONVERT_HEX); + strcpy(local_operand1->string.pointer, "["); + strcat(local_operand1->string.pointer, type_string); + strcat(local_operand1->string.pointer, " Object]"); + status = AE_OK; + break; + } break; case ACPI_TYPE_BUFFER: @@ -347,8 +380,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, /* Concatenate the strings */ strcpy(new_buf, operand0->string.pointer); - strcpy(new_buf + operand0->string.length, - local_operand1->string.pointer); + strcat(new_buf, local_operand1->string.pointer); break; case ACPI_TYPE_BUFFER: @@ -591,8 +623,9 @@ acpi_ex_do_logical_op(u16 opcode, case ACPI_TYPE_STRING: - status = acpi_ex_convert_to_string(operand1, &local_operand1, - ACPI_IMPLICIT_CONVERT_HEX); + status = + acpi_ex_convert_to_string(operand1, &local_operand1, + ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index 472030f2b5bb..843c60ae91f6 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -185,8 +185,9 @@ acpi_ex_acquire_mutex_object(u16 timeout, if (obj_desc == acpi_gbl_global_lock_mutex) { status = acpi_ev_acquire_global_lock(timeout); } else { - status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, - timeout); + status = + acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, + timeout); } if (ACPI_FAILURE(status)) { @@ -243,20 +244,30 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, } /* - * Current sync level must be less than or equal to the sync level of the - * mutex. This mechanism provides some deadlock prevention + * Current sync level must be less than or equal to the sync level + * of the mutex. This mechanism provides some deadlock prevention. */ if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { ACPI_ERROR((AE_INFO, - "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)", + "Cannot acquire Mutex [%4.4s], " + "current SyncLevel is too large (%u)", acpi_ut_get_node_name(obj_desc->mutex.node), walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } - status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value, + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Acquiring: Mutex SyncLevel %u, Thread SyncLevel %u, " + "Depth %u TID %p\n", + obj_desc->mutex.sync_level, + walk_state->thread->current_sync_level, + obj_desc->mutex.acquisition_depth, + walk_state->thread)); + + status = acpi_ex_acquire_mutex_object((u16)time_desc->integer.value, obj_desc, walk_state->thread->thread_id); + if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) { /* Save Thread object, original/current sync levels */ @@ -272,6 +283,12 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, acpi_ex_link_mutex(obj_desc, walk_state->thread); } + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Acquired: Mutex SyncLevel %u, Thread SyncLevel %u, Depth %u\n", + obj_desc->mutex.sync_level, + walk_state->thread->current_sync_level, + obj_desc->mutex.acquisition_depth)); + return_ACPI_STATUS(status); } @@ -356,9 +373,9 @@ acpi_status acpi_ex_release_mutex(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; u8 previous_sync_level; struct acpi_thread_state *owner_thread; + acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_release_mutex); @@ -409,7 +426,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, */ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) { ACPI_ERROR((AE_INFO, - "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u", + "Cannot release Mutex [%4.4s], SyncLevel mismatch: " + "mutex %u current %u", acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.sync_level, walk_state->thread->current_sync_level)); @@ -424,6 +442,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, previous_sync_level = owner_thread->acquired_mutex_list->mutex.original_sync_level; + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Releasing: Object SyncLevel %u, Thread SyncLevel %u, " + "Prev SyncLevel %u, Depth %u TID %p\n", + obj_desc->mutex.sync_level, + walk_state->thread->current_sync_level, + previous_sync_level, + obj_desc->mutex.acquisition_depth, + walk_state->thread)); + status = acpi_ex_release_mutex_object(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); @@ -436,6 +463,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, owner_thread->current_sync_level = previous_sync_level; } + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Released: Object SyncLevel %u, Thread SyncLevel, %u, " + "Prev SyncLevel %u, Depth %u\n", + obj_desc->mutex.sync_level, + walk_state->thread->current_sync_level, + previous_sync_level, + obj_desc->mutex.acquisition_depth)); + return_ACPI_STATUS(status); } @@ -462,21 +497,17 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) union acpi_operand_object *next = thread->acquired_mutex_list; union acpi_operand_object *obj_desc; - ACPI_FUNCTION_NAME(ex_release_all_mutexes); + ACPI_FUNCTION_TRACE(ex_release_all_mutexes); /* Traverse the list of owned mutexes, releasing each one */ while (next) { obj_desc = next; - next = obj_desc->mutex.next; - - obj_desc->mutex.prev = NULL; - obj_desc->mutex.next = NULL; - obj_desc->mutex.acquisition_depth = 0; - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "Force-releasing held mutex: %p\n", - obj_desc)); + "Mutex [%4.4s] force-release, SyncLevel %u Depth %u\n", + obj_desc->mutex.node->name.ascii, + obj_desc->mutex.sync_level, + obj_desc->mutex.acquisition_depth)); /* Release the mutex, special case for Global Lock */ @@ -489,14 +520,21 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) acpi_os_release_mutex(obj_desc->mutex.os_mutex); } - /* Mark mutex unowned */ - - obj_desc->mutex.owner_thread = NULL; - obj_desc->mutex.thread_id = 0; - /* Update Thread sync_level (Last mutex is the important one) */ thread->current_sync_level = obj_desc->mutex.original_sync_level; + + /* Mark mutex unowned */ + + next = obj_desc->mutex.next; + + obj_desc->mutex.prev = NULL; + obj_desc->mutex.next = NULL; + obj_desc->mutex.acquisition_depth = 0; + obj_desc->mutex.owner_thread = NULL; + obj_desc->mutex.thread_id = 0; } + + return_VOID; } diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 20e87813c7d7..b2e911a35866 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -164,8 +164,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string) ACPI_FUNCTION_TRACE(ex_name_segment); /* - * If first character is a digit, then we know that we aren't looking at a - * valid name segment + * If first character is a digit, then we know that we aren't looking + * at a valid name segment */ char_buf[0] = *aml_address; diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 77930683ab7d..efe7ac319f65 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -484,22 +484,26 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */ - status = acpi_ex_convert_to_string(operand[0], &return_desc, - ACPI_EXPLICIT_CONVERT_DECIMAL); + status = + acpi_ex_convert_to_string(operand[0], &return_desc, + ACPI_EXPLICIT_CONVERT_DECIMAL); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ + acpi_ut_add_reference(return_desc); } break; case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */ - status = acpi_ex_convert_to_string(operand[0], &return_desc, - ACPI_EXPLICIT_CONVERT_HEX); + status = + acpi_ex_convert_to_string(operand[0], &return_desc, + ACPI_EXPLICIT_CONVERT_HEX); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ + acpi_ut_add_reference(return_desc); } break; @@ -510,17 +514,20 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ + acpi_ut_add_reference(return_desc); } break; case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */ - status = acpi_ex_convert_to_integer(operand[0], &return_desc, - ACPI_ANY_BASE); + status = + acpi_ex_convert_to_integer(operand[0], &return_desc, + ACPI_ANY_BASE); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ + acpi_ut_add_reference(return_desc); } break; @@ -679,7 +686,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) status = acpi_ex_store(return_desc, operand[0], walk_state); break; - case AML_TYPE_OP: /* object_type (source_object) */ + case AML_OBJECT_TYPE_OP: /* object_type (source_object) */ /* * Note: The operand is not resolved at this point because we want to * get the associated object, not its value. For example, we don't @@ -713,9 +720,9 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) /* Get the base object */ - status = acpi_ex_resolve_multiple(walk_state, - operand[0], &type, - &temp_desc); + status = + acpi_ex_resolve_multiple(walk_state, operand[0], &type, + &temp_desc); if (ACPI_FAILURE(status)) { goto cleanup; } @@ -759,8 +766,10 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) default: ACPI_ERROR((AE_INFO, - "Operand must be Buffer/Integer/String/Package - found type %s", + "Operand must be Buffer/Integer/String/Package" + " - found type %s", acpi_ut_get_type_name(type))); + status = AE_AML_OPERAND_TYPE; goto cleanup; } @@ -981,6 +990,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) "Unknown Index TargetType 0x%X in reference object %p", operand[0]->reference. target_type, operand[0])); + status = AE_AML_OPERAND_TYPE; goto cleanup; } @@ -1050,6 +1060,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index b8944ebb1081..6dad2ca1c8c9 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -199,6 +199,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } @@ -299,8 +300,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) case AML_CONCAT_OP: /* Concatenate (Data1, Data2, Result) */ - status = acpi_ex_do_concatenate(operand[0], operand[1], - &return_desc, walk_state); + status = + acpi_ex_do_concatenate(operand[0], operand[1], &return_desc, + walk_state); break; case AML_TO_STRING_OP: /* to_string (Buffer, Length, Result) (ACPI 2.0) */ @@ -345,8 +347,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) /* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */ - status = acpi_ex_concat_template(operand[0], operand[1], - &return_desc, walk_state); + status = + acpi_ex_concat_template(operand[0], operand[1], + &return_desc, walk_state); break; case AML_INDEX_OP: /* Index (Source Index Result) */ @@ -553,6 +556,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index fa100b3b92ee..27fb0172fca2 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -95,10 +95,11 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state) case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", - (u32) operand[0]->integer.value, - (u32) operand[1]->integer.value, - (u32) operand[2]->integer.value)); + "FatalOp: Type %X Code %X Arg %X " + "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", + (u32)operand[0]->integer.value, + (u32)operand[1]->integer.value, + (u32)operand[2]->integer.value)); fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info)); if (fatal) { @@ -131,6 +132,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } @@ -193,7 +195,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) /* Truncate request if larger than the actual String/Buffer */ else if ((index + length) > operand[0]->string.length) { - length = (acpi_size) operand[0]->string.length - + length = + (acpi_size) operand[0]->string.length - (acpi_size) index; } @@ -237,8 +240,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) /* We have a buffer, copy the portion requested */ - memcpy(buffer, operand[0]->string.pointer + index, - length); + memcpy(buffer, + operand[0]->string.pointer + index, length); } /* Set the length of the new String/Buffer */ @@ -255,6 +258,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } @@ -270,12 +274,11 @@ cleanup: if (ACPI_FAILURE(status) || walk_state->result_obj) { acpi_ut_remove_reference(return_desc); walk_state->result_obj = NULL; - } + } else { + /* Set the return object and exit */ - /* Set the return object and exit */ - - else { walk_state->result_obj = return_desc; } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index c930edda3f65..7efc9f47ffb9 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -310,6 +310,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state) ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); + status = AE_AML_BAD_OPCODE; goto cleanup; } diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 4c2836dc825b..1f111cc94c00 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities + * Module Name: exprep - ACPI AML field prep utilities * *****************************************************************************/ @@ -103,8 +103,10 @@ acpi_ex_generate_access(u32 field_bit_offset, /* Round Field start offset and length to "minimal" byte boundaries */ field_byte_offset = ACPI_DIV_8(ACPI_ROUND_DOWN(field_bit_offset, 8)); - field_byte_end_offset = ACPI_DIV_8(ACPI_ROUND_UP(field_bit_length + - field_bit_offset, 8)); + + field_byte_end_offset = + ACPI_DIV_8(ACPI_ROUND_UP(field_bit_length + field_bit_offset, 8)); + field_byte_length = field_byte_end_offset - field_byte_offset; ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, @@ -159,7 +161,8 @@ acpi_ex_generate_access(u32 field_bit_offset, if (accesses <= 1) { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, - "Entire field can be accessed with one operation of size %u\n", + "Entire field can be accessed " + "with one operation of size %u\n", access_byte_width)); return_VALUE(access_byte_width); } @@ -202,6 +205,7 @@ acpi_ex_generate_access(u32 field_bit_offset, */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Cannot access field in one operation, using width 8\n")); + return_VALUE(8); } #endif /* ACPI_UNDER_DEVELOPMENT */ @@ -281,6 +285,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc, /* Invalid field access type */ ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access)); + return_UINT32(0); } @@ -354,8 +359,8 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, * For all other access types (Byte, Word, Dword, Qword), the Bitwidth is * the same (equivalent) as the byte_alignment. */ - access_bit_width = acpi_ex_decode_field_access(obj_desc, field_flags, - &byte_alignment); + access_bit_width = + acpi_ex_decode_field_access(obj_desc, field_flags, &byte_alignment); if (!access_bit_width) { return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } @@ -595,7 +600,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) access_byte_width); ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, - "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n", + "IndexField: BitOff %X, Off %X, Value %X, " + "Gran %X, Index %p, Data %p\n", obj_desc->index_field.start_field_bit_offset, obj_desc->index_field.base_byte_offset, obj_desc->index_field.value, @@ -615,8 +621,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) * Store the constructed descriptor (obj_desc) into the parent Node, * preserving the current type of that named_obj. */ - status = acpi_ns_attach_object(info->field_node, obj_desc, - acpi_ns_get_type(info->field_node)); + status = + acpi_ns_attach_object(info->field_node, obj_desc, + acpi_ns_get_type(info->field_node)); ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Set NamedObj %p [%4.4s], ObjDesc %p\n", diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index b4a5e44c00dd..1851a307544a 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -392,7 +392,8 @@ acpi_ex_pci_config_space_handler(u32 function, pci_register = (u16) (u32) address; ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", + "Pci-Config %u (%u) Seg(%04x) Bus(%04x) " + "Dev(%04x) Func(%04x) Reg(%04x)\n", function, bit_width, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, pci_register)); @@ -400,14 +401,16 @@ acpi_ex_pci_config_space_handler(u32 function, case ACPI_READ: *value = 0; - status = acpi_os_read_pci_configuration(pci_id, pci_register, - value, bit_width); + status = + acpi_os_read_pci_configuration(pci_id, pci_register, value, + bit_width); break; case ACPI_WRITE: - status = acpi_os_write_pci_configuration(pci_id, pci_register, - *value, bit_width); + status = + acpi_os_write_pci_configuration(pci_id, pci_register, + *value, bit_width); break; default: diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index 1b372ef69308..6793dcc8a946 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -112,7 +112,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, /* * Several object types require no further processing: - * 1) Device/Thermal objects don't have a "real" subobject, return the Node + * 1) Device/Thermal objects don't have a "real" subobject, return Node * 2) Method locals and arguments have a pseudo-Node * 3) 10/2007: Added method type to assist with Package construction. */ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index a1afe1a1e7c2..7f9260b129fc 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -217,7 +217,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, * the package, can't dereference it */ ACPI_ERROR((AE_INFO, - "Attempt to dereference an Index to NULL package element Idx=%p", + "Attempt to dereference an Index to " + "NULL package element Idx=%p", stack_desc)); status = AE_AML_UNINITIALIZED_ELEMENT; } @@ -361,10 +362,9 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, if (type == ACPI_TYPE_LOCAL_ALIAS) { type = ((struct acpi_namespace_node *)obj_desc)->type; - obj_desc = - acpi_ns_get_attached_object((struct - acpi_namespace_node *) - obj_desc); + obj_desc = acpi_ns_get_attached_object((struct + acpi_namespace_node + *)obj_desc); } if (!obj_desc) { diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index 424442d50b5e..861453e58555 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -90,8 +90,8 @@ acpi_ex_check_object_type(acpi_object_type type_needed, * specification, a store to a constant is a noop.) */ if ((this_type == ACPI_TYPE_INTEGER) && - (((union acpi_operand_object *)object)->common. - flags & AOPOBJ_AML_CONSTANT)) { + (((union acpi_operand_object *)object)->common.flags & + AOPOBJ_AML_CONSTANT)) { return (AE_OK); } } @@ -196,10 +196,10 @@ acpi_ex_resolve_operands(u16 opcode, * thus, the attached object is always the aliased namespace node */ if (object_type == ACPI_TYPE_LOCAL_ALIAS) { - obj_desc = - acpi_ns_get_attached_object((struct - acpi_namespace_node - *)obj_desc); + obj_desc = acpi_ns_get_attached_object((struct + acpi_namespace_node + *) + obj_desc); *stack_ptr = obj_desc; object_type = ((struct acpi_namespace_node *)obj_desc)-> @@ -285,8 +285,8 @@ acpi_ex_resolve_operands(u16 opcode, case ARGI_REF_OR_STRING: /* Can be a String or Reference */ if ((ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == - ACPI_DESC_TYPE_OPERAND) - && (obj_desc->common.type == ACPI_TYPE_STRING)) { + ACPI_DESC_TYPE_OPERAND) && + (obj_desc->common.type == ACPI_TYPE_STRING)) { /* * String found - the string references a named object and * must be resolved to a node @@ -465,8 +465,9 @@ acpi_ex_resolve_operands(u16 opcode, * But we can implicitly convert from a BUFFER or INTEGER * aka - "Implicit Source Operand Conversion" */ - status = acpi_ex_convert_to_string(obj_desc, stack_ptr, - ACPI_IMPLICIT_CONVERT_HEX); + status = + acpi_ex_convert_to_string(obj_desc, stack_ptr, + ACPI_IMPLICIT_CONVERT_HEX); if (ACPI_FAILURE(status)) { if (status == AE_TYPE) { ACPI_ERROR((AE_INFO, @@ -597,8 +598,10 @@ acpi_ex_resolve_operands(u16 opcode, case ARGI_REGION_OR_BUFFER: /* Used by Load() only */ - /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */ - + /* + * Need an operand of type REGION or a BUFFER + * (which could be a resolved region field) + */ switch (obj_desc->common.type) { case ACPI_TYPE_BUFFER: case ACPI_TYPE_REGION: @@ -640,9 +643,9 @@ acpi_ex_resolve_operands(u16 opcode, if (acpi_gbl_enable_interpreter_slack) { /* - * Enable original behavior of Store(), allowing any and all - * objects as the source operand. The ACPI spec does not - * allow this, however. + * Enable original behavior of Store(), allowing any + * and all objects as the source operand. The ACPI + * spec does not allow this, however. */ break; } @@ -655,7 +658,8 @@ acpi_ex_resolve_operands(u16 opcode, } ACPI_ERROR((AE_INFO, - "Needed Integer/Buffer/String/Package/Ref/Ddb], found [%s] %p", + "Needed Integer/Buffer/String/Package/Ref/Ddb]" + ", found [%s] %p", acpi_ut_get_object_type_name (obj_desc), obj_desc)); @@ -678,9 +682,10 @@ acpi_ex_resolve_operands(u16 opcode, * Make sure that the original object was resolved to the * required object type (Simple cases only). */ - status = acpi_ex_check_object_type(type_needed, - (*stack_ptr)->common.type, - *stack_ptr); + status = + acpi_ex_check_object_type(type_needed, + (*stack_ptr)->common.type, + *stack_ptr); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index c076e9100d66..d3afbcbe7886 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -467,7 +467,8 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, case ACPI_TYPE_THERMAL: ACPI_ERROR((AE_INFO, - "Target must be [Buffer/Integer/String/Reference], found [%s] (%4.4s)", + "Target must be [Buffer/Integer/String/Reference]" + ", found [%s] (%4.4s)", acpi_ut_get_type_name(node->type), node->name.ascii)); @@ -504,8 +505,9 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, * an implicit conversion, as per the ACPI specification. * A direct store is performed instead. */ - status = acpi_ex_store_direct_to_node(source_desc, node, - walk_state); + status = + acpi_ex_store_direct_to_node(source_desc, node, + walk_state); break; } @@ -528,8 +530,9 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, * store has been performed such that the node/object type * has been changed. */ - status = acpi_ns_attach_object(node, new_desc, - new_desc->common.type); + status = + acpi_ns_attach_object(node, new_desc, + new_desc->common.type); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Store type [%s] into [%s] via Convert/Attach\n", @@ -563,8 +566,8 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, * operator. (Note, for this default case, all normal * Store/Target operations exited above with an error). */ - status = acpi_ex_store_direct_to_node(source_desc, node, - walk_state); + status = + acpi_ex_store_direct_to_node(source_desc, node, walk_state); break; } diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index e1d4f4d51b97..ad3bc92af2e6 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Module Name: exstorob - AML Interpreter object store support, store to object + * Module Name: exstorob - AML object store support, store to object * *****************************************************************************/ @@ -203,8 +203,9 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, ACPI_FREE(target_desc->string.pointer); } - target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) - length + 1); + target_desc->string.pointer = + ACPI_ALLOCATE_ZEROED((acpi_size) length + 1); + if (!target_desc->string.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index 05450656fe3d..7c91c1f799a5 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -78,7 +78,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) /* We must wait, so unlock the interpreter */ acpi_ex_exit_interpreter(); - status = acpi_os_wait_semaphore(semaphore, 1, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, @@ -124,7 +123,6 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) /* We must wait, so unlock the interpreter */ acpi_ex_exit_interpreter(); - status = acpi_os_acquire_mutex(mutex, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, @@ -169,8 +167,8 @@ acpi_status acpi_ex_system_do_stall(u32 how_long) * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ - ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)", - how_long)); + ACPI_ERROR((AE_INFO, + "Time parameter is too large (%u)", how_long)); status = AE_AML_OPERAND_VALUE; } else { acpi_os_stall(how_long); diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c new file mode 100644 index 000000000000..e4a185eece8a --- /dev/null +++ b/drivers/acpi/acpica/extrace.c @@ -0,0 +1,377 @@ +/****************************************************************************** + * + * Module Name: extrace - Support for interpreter execution tracing + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2015, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acnamesp.h" +#include "acinterp.h" + +#define _COMPONENT ACPI_EXECUTER +ACPI_MODULE_NAME("extrace") + +static union acpi_operand_object *acpi_gbl_trace_method_object = NULL; + +/* Local prototypes */ + +#ifdef ACPI_DEBUG_OUTPUT +static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type); +#endif + +/******************************************************************************* + * + * FUNCTION: acpi_ex_interpreter_trace_enabled + * + * PARAMETERS: name - Whether method name should be matched, + * this should be checked before starting + * the tracer + * + * RETURN: TRUE if interpreter trace is enabled. + * + * DESCRIPTION: Check whether interpreter trace is enabled + * + ******************************************************************************/ + +static u8 acpi_ex_interpreter_trace_enabled(char *name) +{ + + /* Check if tracing is enabled */ + + if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) { + return (FALSE); + } + + /* + * Check if tracing is filtered: + * + * 1. If the tracer is started, acpi_gbl_trace_method_object should have + * been filled by the trace starter + * 2. If the tracer is not started, acpi_gbl_trace_method_name should be + * matched if it is specified + * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should + * not be cleared by the trace stopper during the first match + */ + if (acpi_gbl_trace_method_object) { + return (TRUE); + } + + if (name && + (acpi_gbl_trace_method_name && + strcmp(acpi_gbl_trace_method_name, name))) { + return (FALSE); + } + + if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) && + !acpi_gbl_trace_method_name) { + return (FALSE); + } + + return (TRUE); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_get_trace_event_name + * + * PARAMETERS: type - Trace event type + * + * RETURN: Trace event name. + * + * DESCRIPTION: Used to obtain the full trace event name. + * + ******************************************************************************/ + +#ifdef ACPI_DEBUG_OUTPUT + +static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type) +{ + + switch (type) { + case ACPI_TRACE_AML_METHOD: + + return "Method"; + + case ACPI_TRACE_AML_OPCODE: + + return "Opcode"; + + case ACPI_TRACE_AML_REGION: + + return "Region"; + + default: + + return ""; + } +} + +#endif + +/******************************************************************************* + * + * FUNCTION: acpi_ex_trace_point + * + * PARAMETERS: type - Trace event type + * begin - TRUE if before execution + * aml - Executed AML address + * pathname - Object path + * + * RETURN: None + * + * DESCRIPTION: Internal interpreter execution trace. + * + ******************************************************************************/ + +void +acpi_ex_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname) +{ + + ACPI_FUNCTION_NAME(ex_trace_point); + + if (pathname) { + ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, + "%s %s [0x%p:%s] execution.\n", + acpi_ex_get_trace_event_name(type), + begin ? "Begin" : "End", aml, pathname)); + } else { + ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, + "%s %s [0x%p] execution.\n", + acpi_ex_get_trace_event_name(type), + begin ? "Begin" : "End", aml)); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_start_trace_method + * + * PARAMETERS: method_node - Node of the method + * obj_desc - The method object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Start control method execution trace + * + ******************************************************************************/ + +void +acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + char *pathname = NULL; + u8 enabled = FALSE; + + ACPI_FUNCTION_NAME(ex_start_trace_method); + + if (method_node) { + pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit; + } + + enabled = acpi_ex_interpreter_trace_enabled(pathname); + if (enabled && !acpi_gbl_trace_method_object) { + acpi_gbl_trace_method_object = obj_desc; + acpi_gbl_original_dbg_level = acpi_dbg_level; + acpi_gbl_original_dbg_layer = acpi_dbg_layer; + acpi_dbg_level = ACPI_TRACE_LEVEL_ALL; + acpi_dbg_layer = ACPI_TRACE_LAYER_ALL; + + if (acpi_gbl_trace_dbg_level) { + acpi_dbg_level = acpi_gbl_trace_dbg_level; + } + + if (acpi_gbl_trace_dbg_layer) { + acpi_dbg_layer = acpi_gbl_trace_dbg_layer; + } + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + +exit: + if (enabled) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE, + obj_desc ? obj_desc->method.aml_start : NULL, + pathname); + } + + if (pathname) { + ACPI_FREE(pathname); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_stop_trace_method + * + * PARAMETERS: method_node - Node of the method + * obj_desc - The method object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Stop control method execution trace + * + ******************************************************************************/ + +void +acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + char *pathname = NULL; + u8 enabled; + + ACPI_FUNCTION_NAME(ex_stop_trace_method); + + if (method_node) { + pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit_path; + } + + enabled = acpi_ex_interpreter_trace_enabled(NULL); + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + + if (enabled) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE, + obj_desc ? obj_desc->method.aml_start : NULL, + pathname); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit_path; + } + + /* Check whether the tracer should be stopped */ + + if (acpi_gbl_trace_method_object == obj_desc) { + + /* Disable further tracing if type is one-shot */ + + if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) { + acpi_gbl_trace_method_name = NULL; + } + + acpi_dbg_level = acpi_gbl_original_dbg_level; + acpi_dbg_layer = acpi_gbl_original_dbg_layer; + acpi_gbl_trace_method_object = NULL; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + +exit_path: + if (pathname) { + ACPI_FREE(pathname); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_start_trace_opcode + * + * PARAMETERS: op - The parser opcode object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Start opcode execution trace + * + ******************************************************************************/ + +void +acpi_ex_start_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state) +{ + + ACPI_FUNCTION_NAME(ex_start_trace_opcode); + + if (acpi_ex_interpreter_trace_enabled(NULL) && + (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE, + op->common.aml, op->common.aml_op_name); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_stop_trace_opcode + * + * PARAMETERS: op - The parser opcode object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Stop opcode execution trace + * + ******************************************************************************/ + +void +acpi_ex_stop_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state) +{ + + ACPI_FUNCTION_NAME(ex_stop_trace_opcode); + + if (acpi_ex_interpreter_trace_enabled(NULL) && + (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE, + op->common.aml, op->common.aml_op_name); + } +} diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 30c3f464fda5..8ae7634bd7d2 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -167,8 +167,8 @@ u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc) if ((acpi_gbl_integer_byte_width == 4) && (obj_desc->integer.value > (u64)ACPI_UINT32_MAX)) { /* - * We are executing in a 32-bit ACPI table. - * Truncate the value to 32 bits by zeroing out the upper 32-bit field + * We are executing in a 32-bit ACPI table. Truncate + * the value to 32 bits by zeroing out the upper 32-bit field */ obj_desc->integer.value &= (u64)ACPI_UINT32_MAX; return (TRUE); @@ -323,7 +323,8 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id) if (compressed_id > ACPI_UINT32_MAX) { ACPI_WARNING((AE_INFO, - "Expected EISAID is larger than 32 bits: 0x%8.8X%8.8X, truncating", + "Expected EISAID is larger than 32 bits: " + "0x%8.8X%8.8X, truncating", ACPI_FORMAT_UINT64(compressed_id))); } diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index e5599f610808..d0319a228ef7 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -117,8 +117,8 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) /* Clear wake status (WAK_STS) */ - status = - acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); + status = acpi_write((u64)ACPI_X_WAKE_STATUS, + &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 73cfa5947ff3..8272f966382a 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -187,9 +187,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); - status = acpi_hw_write(register_bit, - &gpe_register_info->status_address); - + status = + acpi_hw_write(register_bit, &gpe_register_info->status_address); return (status); } @@ -297,6 +296,7 @@ acpi_hw_gpe_enable_write(u8 enable_mask, acpi_status status; gpe_register_info->enable_mask = enable_mask; + status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); return (status); } diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 7d21cae6d602..ac5b7f768d4b 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -80,8 +80,8 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) /* Clear wake status */ - status = - acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS); + status = acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, + ACPI_CLEAR_STATUS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 5f97468df8ff..b2e50d8007fe 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -504,11 +504,20 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) * Evaluate the \_Sx namespace object containing the register values * for this state */ - info->relative_pathname = - ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); + info->relative_pathname = ACPI_CAST_PTR(char, + acpi_gbl_sleep_state_names + [sleep_state]); + status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { - goto cleanup; + if (status == AE_NOT_FOUND) { + + /* The _Sx states are optional, ignore NOT_FOUND */ + + goto final_cleanup; + } + + goto warning_cleanup; } /* Must have a return object */ @@ -517,7 +526,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", info->relative_pathname)); status = AE_AML_NO_RETURN_VALUE; - goto cleanup; + goto warning_cleanup; } /* Return object must be of type Package */ @@ -526,7 +535,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) ACPI_ERROR((AE_INFO, "Sleep State return object is not a Package")); status = AE_AML_OPERAND_TYPE; - goto cleanup1; + goto return_value_cleanup; } /* @@ -570,16 +579,17 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) break; } -cleanup1: +return_value_cleanup: acpi_ut_remove_reference(info->return_object); -cleanup: +warning_cleanup: if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While evaluating Sleep State [%s]", info->relative_pathname)); } +final_cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index d62a61612b3f..1ce4efa1a2bd 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -52,9 +52,9 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ #if (!ACPI_REDUCED_HARDWARE) static acpi_status -acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs, - acpi_physical_address physical_address, - acpi_physical_address physical_address64); +acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, + acpi_physical_address physical_address, + acpi_physical_address physical_address64); #endif static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); @@ -79,22 +79,20 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = { /* * These functions are removed for the ACPI_REDUCED_HARDWARE case: - * acpi_set_firmware_waking_vectors * acpi_set_firmware_waking_vector - * acpi_set_firmware_waking_vector64 * acpi_enter_sleep_state_s4bios */ #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * - * FUNCTION: acpi_hw_set_firmware_waking_vectors + * FUNCTION: acpi_hw_set_firmware_waking_vector * * PARAMETERS: facs - Pointer to FACS table * physical_address - 32-bit physical address of ACPI real mode - * entry point. + * entry point * physical_address64 - 64-bit physical address of ACPI protected - * mode entry point. + * mode entry point * * RETURN: Status * @@ -103,11 +101,11 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = { ******************************************************************************/ static acpi_status -acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs, - acpi_physical_address physical_address, - acpi_physical_address physical_address64) +acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, + acpi_physical_address physical_address, + acpi_physical_address physical_address64) { - ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vectors); + ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vector); /* @@ -140,12 +138,12 @@ acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs, /******************************************************************************* * - * FUNCTION: acpi_set_firmware_waking_vectors + * FUNCTION: acpi_set_firmware_waking_vector * * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode - * entry point. + * entry point * physical_address64 - 64-bit physical address of ACPI protected - * mode entry point. + * mode entry point * * RETURN: Status * @@ -154,79 +152,23 @@ acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs, ******************************************************************************/ acpi_status -acpi_set_firmware_waking_vectors(acpi_physical_address physical_address, - acpi_physical_address physical_address64) +acpi_set_firmware_waking_vector(acpi_physical_address physical_address, + acpi_physical_address physical_address64) { - ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors); + ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); if (acpi_gbl_FACS) { - (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS, - physical_address, - physical_address64); + (void)acpi_hw_set_firmware_waking_vector(acpi_gbl_FACS, + physical_address, + physical_address64); } return_ACPI_STATUS(AE_OK); } -ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vectors) - -/******************************************************************************* - * - * FUNCTION: acpi_set_firmware_waking_vector - * - * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode - * entry point. - * - * RETURN: Status - * - * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS - * - ******************************************************************************/ -acpi_status acpi_set_firmware_waking_vector(u32 physical_address) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); - - status = acpi_set_firmware_waking_vectors((acpi_physical_address) - physical_address, 0); - - return_ACPI_STATUS(status); -} - ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) -#if ACPI_MACHINE_WIDTH == 64 -/******************************************************************************* - * - * FUNCTION: acpi_set_firmware_waking_vector64 - * - * PARAMETERS: physical_address - 64-bit physical address of ACPI protected - * mode entry point. - * - * RETURN: Status - * - * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if - * it exists in the table. This function is intended for use with - * 64-bit host operating systems. - * - ******************************************************************************/ -acpi_status acpi_set_firmware_waking_vector64(u64 physical_address) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64); - - status = acpi_set_firmware_waking_vectors(0, - (acpi_physical_address) - physical_address); - - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64) -#endif /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_s4bios @@ -286,6 +228,7 @@ acpi_status acpi_enter_sleep_state_s4bios(void) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + } while (!in_value); return_ACPI_STATUS(AE_OK); diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index da55a1c60da1..f21568ba325b 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -96,9 +96,9 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object, /* Extract each buffer byte to create the integer */ for (i = 0; i < original_object->buffer.length; i++) { - value |= - ((u64)original_object->buffer. - pointer[i] << (i * 8)); + value |= ((u64) + original_object->buffer.pointer[i] << (i * + 8)); } break; @@ -153,10 +153,9 @@ acpi_ns_convert_to_string(union acpi_operand_object *original_object, return (AE_NO_MEMORY); } } else { - status = - acpi_ex_convert_to_string(original_object, - &new_object, - ACPI_IMPLICIT_CONVERT_HEX); + status = acpi_ex_convert_to_string(original_object, + &new_object, + ACPI_IMPLICIT_CONVERT_HEX); if (ACPI_FAILURE(status)) { return (status); } @@ -244,9 +243,8 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, /* String-to-Buffer conversion. Simple data copy */ - new_object = - acpi_ut_create_buffer_object(original_object->string. - length); + new_object = acpi_ut_create_buffer_object + (original_object->string.length); if (!new_object) { return (AE_NO_MEMORY); } @@ -308,7 +306,8 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, * * FUNCTION: acpi_ns_convert_to_unicode * - * PARAMETERS: original_object - ASCII String Object to be converted + * PARAMETERS: scope - Namespace node for the method/object + * original_object - ASCII String Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful. @@ -318,7 +317,8 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, ******************************************************************************/ acpi_status -acpi_ns_convert_to_unicode(union acpi_operand_object *original_object, +acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope, + union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; @@ -372,7 +372,8 @@ acpi_ns_convert_to_unicode(union acpi_operand_object *original_object, * * FUNCTION: acpi_ns_convert_to_resource * - * PARAMETERS: original_object - Object to be converted + * PARAMETERS: scope - Namespace node for the method/object + * original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful @@ -383,7 +384,8 @@ acpi_ns_convert_to_unicode(union acpi_operand_object *original_object, ******************************************************************************/ acpi_status -acpi_ns_convert_to_resource(union acpi_operand_object *original_object, +acpi_ns_convert_to_resource(struct acpi_namespace_node * scope, + union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; @@ -444,3 +446,78 @@ acpi_ns_convert_to_resource(union acpi_operand_object *original_object, *return_object = new_object; return (AE_OK); } + +/******************************************************************************* + * + * FUNCTION: acpi_ns_convert_to_reference + * + * PARAMETERS: scope - Namespace node for the method/object + * original_object - Object to be converted + * return_object - Where the new converted object is returned + * + * RETURN: Status. AE_OK if conversion was successful + * + * DESCRIPTION: Attempt to convert a Integer object to a object_reference. + * Buffer. + * + ******************************************************************************/ + +acpi_status +acpi_ns_convert_to_reference(struct acpi_namespace_node * scope, + union acpi_operand_object *original_object, + union acpi_operand_object **return_object) +{ + union acpi_operand_object *new_object = NULL; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_generic_state scope_info; + char *name; + + ACPI_FUNCTION_NAME(ns_convert_to_reference); + + /* Convert path into internal presentation */ + + status = + acpi_ns_internalize_name(original_object->string.pointer, &name); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Find the namespace node */ + + scope_info.scope.node = + ACPI_CAST_PTR(struct acpi_namespace_node, scope); + status = + acpi_ns_lookup(&scope_info, name, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, + NULL, &node); + if (ACPI_FAILURE(status)) { + + /* Check if we are resolving a named reference within a package */ + + ACPI_ERROR_NAMESPACE(original_object->string.pointer, status); + goto error_exit; + } + + /* Create and init a new internal ACPI object */ + + new_object = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); + if (!new_object) { + status = AE_NO_MEMORY; + goto error_exit; + } + new_object->reference.node = node; + new_object->reference.object = node->object; + new_object->reference.class = ACPI_REFCLASS_NAME; + + /* + * Increase reference of the object if needed (the object is likely a + * null for device nodes). + */ + acpi_ut_add_reference(node->object); + +error_exit: + ACPI_FREE(name); + *return_object = new_object; + return (AE_OK); +} diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 37aa5c45ca4b..bc5ff358b2a7 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -539,11 +539,13 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, acpi_os_printf ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n", obj_type); + bytes_to_dump = 32; } else { acpi_os_printf ("(Pointer to ACPI Object type %.2X [%s])\n", obj_type, acpi_ut_get_type_name(obj_type)); + bytes_to_dump = sizeof(union acpi_operand_object); } @@ -573,6 +575,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, */ bytes_to_dump = obj_desc->string.length; obj_desc = (void *)obj_desc->string.pointer; + acpi_os_printf("(Buffer/String pointer %p length %X)\n", obj_desc, bytes_to_dump); ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump); @@ -717,7 +720,7 @@ acpi_ns_dump_one_object_path(acpi_handle obj_handle, return (AE_OK); } - pathname = acpi_ns_get_external_pathname(node); + pathname = acpi_ns_get_normalized_pathname(node, TRUE); path_indent = 1; if (level <= max_level) { diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index 7eba578d36f3..15e0b2ec5d65 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -135,7 +135,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) /* Get the full pathname to the object, for use in warning messages */ - info->full_pathname = acpi_ns_get_external_pathname(info->node); + info->full_pathname = acpi_ns_get_normalized_pathname(info->node, TRUE); if (!info->full_pathname) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index b744a53618eb..ac59929c3ee9 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -582,7 +582,8 @@ acpi_ns_init_one_device(acpi_handle obj_handle, /* Ignore error and move on to next device */ - char *scope_name = acpi_ns_get_external_pathname(info->node); + char *scope_name = + acpi_ns_get_normalized_pathname(device_node, TRUE); ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution", scope_name)); diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 14ab83668207..14c953e6fe9e 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -149,6 +149,23 @@ unlock: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); + /* + * Execute any module-level code that was detected during the table load + * phase. Although illegal since ACPI 2.0, there are many machines that + * contain this type of code. Each block of detected executable AML code + * outside of any control method is wrapped with a temporary control + * method object and placed on a global list. The methods on this list + * are executed below. + * + * This case executes the module-level code for each table immediately + * after the table has been loaded. This provides compatibility with + * other ACPI implementations. Optionally, the execution can be deferred + * until later, see acpi_initialize_objects. + */ + if (!acpi_gbl_group_module_level_code) { + acpi_ns_exec_module_code_list(); + } + return_ACPI_STATUS(status); } @@ -321,7 +338,6 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle) /* This function does the real work */ status = acpi_ns_delete_subtree(handle); - return_ACPI_STATUS(status); } #endif diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 8934b4eddb73..521031f9b6c6 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -70,7 +70,6 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); name_buffer = acpi_ns_get_normalized_pathname(node, FALSE); - return_PTR(name_buffer); } @@ -93,7 +92,6 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) ACPI_FUNCTION_ENTRY(); size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE); - return (size); } @@ -217,6 +215,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node, ACPI_PATH_PUT8(full_path, path_size, AML_DUAL_NAME_PREFIX, length); } + ACPI_MOVE_32_TO_32(name, &next_node->name); do_no_trailing = no_trailing; for (i = 0; i < 4; i++) { @@ -228,8 +227,10 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node, ACPI_PATH_PUT8(full_path, path_size, c, length); } } + next_node = next_node->parent; } + ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length); /* Reverse the path string */ @@ -237,6 +238,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node, if (length <= path_size) { left = full_path; right = full_path + length - 1; + while (left < right) { c = *left; *left++ = *right; diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 3736d43b18b9..43b45a8c2fe4 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -141,8 +141,8 @@ acpi_ns_one_complete_parse(u32 pass_number, /* Parse the AML */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %u parse\n", - pass_number)); + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "*PARSE* pass %u parse\n", pass_number)); status = acpi_ps_parse_aml(walk_state); cleanup: @@ -181,6 +181,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) * performs another complete parse of the AML. */ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); + status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index, start_node); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 9bb251932b45..c05a83be5c11 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -233,8 +233,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, /* First element is the (Integer) revision */ - status = acpi_ns_check_object_type(info, elements, - ACPI_RTYPE_INTEGER, 0); + status = + acpi_ns_check_object_type(info, elements, + ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } @@ -252,8 +253,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, /* First element is the (Integer) count of subpackages to follow */ - status = acpi_ns_check_object_type(info, elements, - ACPI_RTYPE_INTEGER, 0); + status = + acpi_ns_check_object_type(info, elements, + ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 77d8103d0094..6418863f93d5 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -116,6 +116,11 @@ static const struct acpi_simple_repair_info acpi_object_repair_info[] = { ACPI_NOT_PACKAGE_ELEMENT, acpi_ns_convert_to_resource}, + /* Object reference conversions */ + + {"_DEP", ACPI_RTYPE_STRING, ACPI_ALL_PACKAGE_ELEMENTS, + acpi_ns_convert_to_reference}, + /* Unicode conversions */ {"_MLS", ACPI_RTYPE_STRING, 1, @@ -172,8 +177,8 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, "Missing expected return value")); } - status = - predefined->object_converter(return_object, &new_object); + status = predefined->object_converter(info->node, return_object, + &new_object); if (ACPI_FAILURE(status)) { /* A fatal error occurred during a conversion */ @@ -360,12 +365,15 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct /* Check if we can actually repair this name/type combination */ if ((return_btype & this_name->unexpected_btypes) && - (package_index == this_name->package_index)) { + (this_name->package_index == + ACPI_ALL_PACKAGE_ELEMENTS + || package_index == this_name->package_index)) { return (this_name); } return (NULL); } + this_name++; } @@ -521,6 +529,7 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info, *dest = *source; dest++; } + source++; } @@ -572,8 +581,8 @@ acpi_ns_wrap_with_package(struct acpi_evaluate_info *info, ACPI_FUNCTION_NAME(ns_wrap_with_package); /* - * Create the new outer package and populate it. The new package will - * have a single element, the lone sub-object. + * Create the new outer package and populate it. The new + * package will have a single element, the lone sub-object. */ pkg_obj_desc = acpi_ut_create_package_object(1); if (!pkg_obj_desc) { diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 0515a70f42a4..f6dd2a83ea63 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -225,6 +225,7 @@ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) { return (this_name); } + this_name++; } @@ -301,7 +302,8 @@ acpi_ns_repair_FDE(struct acpi_evaluate_info *info, /* We can only repair if we have exactly 5 BYTEs */ if (return_object->buffer.length != ACPI_FDE_BYTE_BUFFER_SIZE) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, info->node_flags, "Incorrect return buffer length %u, expected %u", return_object->buffer.length, @@ -321,8 +323,8 @@ acpi_ns_repair_FDE(struct acpi_evaluate_info *info, /* Expand each byte to a DWORD */ byte_buffer = return_object->buffer.pointer; - dword_buffer = - ACPI_CAST_PTR(u32, buffer_object->buffer.pointer); + dword_buffer = ACPI_CAST_PTR(u32, + buffer_object->buffer.pointer); for (i = 0; i < ACPI_FDE_FIELD_COUNT; i++) { *dword_buffer = (u32) *byte_buffer; @@ -461,7 +463,8 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info, removing = FALSE; if ((*outer_elements)->package.count == 0) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, info->node_flags, "SubPackage[%u] - removing entry due to zero count", i)); @@ -471,7 +474,8 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info, obj_desc = (*outer_elements)->package.elements[1]; /* Index1 = Type */ if ((u32)obj_desc->integer.value == 0) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, info->node_flags, "SubPackage[%u] - removing entry due to invalid Type(0)", i)); @@ -538,8 +542,8 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, } if (return_object->string.length == 0) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, - info->node_flags, + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, info->node_flags, "Invalid zero-length _HID or _CID string")); /* Return AE_OK anyway, let driver handle it */ @@ -710,8 +714,9 @@ acpi_ns_repair_PSS(struct acpi_evaluate_info *info, elements = (*outer_elements)->package.elements; obj_desc = elements[1]; /* Index1 = power_dissipation */ - if ((u32) obj_desc->integer.value > previous_value) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, + if ((u32)obj_desc->integer.value > previous_value) { + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, info->node_flags, "SubPackage[%u,%u] - suspicious power dissipation values", i - 1, i)); @@ -969,6 +974,7 @@ acpi_ns_remove_element(union acpi_operand_object *obj_desc, u32 index) *dest = *source; dest++; } + source++; } diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index d73904013830..9cc3564de37e 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c @@ -105,7 +105,7 @@ acpi_ns_search_one_scope(u32 target_name, if (ACPI_LV_NAMES & acpi_dbg_level) { char *scope_name; - scope_name = acpi_ns_get_external_pathname(parent_node); + scope_name = acpi_ns_get_normalized_pathname(parent_node, TRUE); if (scope_name) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Searching %s (%p) For [%4.4s] (%s)\n", diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index de325ae04ce1..32f1d956eb7f 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -173,9 +173,10 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) info->fully_qualified = FALSE; /* - * For the internal name, the required length is 4 bytes per segment, plus - * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null - * (which is not really needed, but no there's harm in putting it there) + * For the internal name, the required length is 4 bytes per segment, + * plus 1 each for root_prefix, multi_name_prefix_op, segment count, + * trailing null (which is not really needed, but no there's harm in + * putting it there) * * strlen() + 1 covers the first name_seg, which has no path separator */ @@ -699,6 +700,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node, if (!prefix_node) { *return_node = acpi_gbl_root_node; } + return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 6ee1e52b903d..429f0d27bef0 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -750,8 +750,8 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, /* We have a valid device, invoke the user function */ - status = info->user_function(obj_handle, nesting_level, info->context, - return_value); + status = info->user_function(obj_handle, nesting_level, + info->context, return_value); return (status); } diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 4b4d2f43d406..669e0f1b0967 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -159,7 +159,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) { acpi_status status; struct acpi_namespace_node *node; - char *node_name; + const char *node_name; /* Parameter validation */ @@ -238,7 +238,6 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, struct acpi_pnp_device_id *source, char *string_area) { - /* Create the destination PNP_DEVICE_ID */ dest->string = string_area; @@ -263,11 +262,18 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, * namespace node and possibly by running several standard * control methods (Such as in the case of a device.) * - * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB, - * _CLS, _STA, _ADR, _sx_w, and _sx_d methods. + * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA, + * _CLS, _ADR, _sx_w, and _sx_d methods. * * Note: Allocates the return buffer, must be freed by the caller. * + * Note: This interface is intended to be used during the initial device + * discovery namespace traversal. Therefore, no complex methods can be + * executed, especially those that access operation regions. Therefore, do + * not add any additional methods that could cause problems in this area. + * this was the fate of the _SUB method which was found to cause such + * problems and was removed (11/2015). + * ******************************************************************************/ acpi_status @@ -279,7 +285,6 @@ acpi_get_object_info(acpi_handle handle, struct acpi_pnp_device_id_list *cid_list = NULL; struct acpi_pnp_device_id *hid = NULL; struct acpi_pnp_device_id *uid = NULL; - struct acpi_pnp_device_id *sub = NULL; struct acpi_pnp_device_id *cls = NULL; char *next_id_string; acpi_object_type type; @@ -325,7 +330,7 @@ acpi_get_object_info(acpi_handle handle, if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { /* * Get extra info for ACPI Device/Processor objects only: - * Run the Device _HID, _UID, _SUB, _CID, and _CLS methods. + * Run the Device _HID, _UID, _CLS, and _CID methods. * * Note: none of these methods are required, so they may or may * not be present for this device. The Info->Valid bitfield is used @@ -348,14 +353,6 @@ acpi_get_object_info(acpi_handle handle, valid |= ACPI_VALID_UID; } - /* Execute the Device._SUB method */ - - status = acpi_ut_execute_SUB(node, &sub); - if (ACPI_SUCCESS(status)) { - info_size += sub->length; - valid |= ACPI_VALID_SUB; - } - /* Execute the Device._CID method */ status = acpi_ut_execute_CID(node, &cid_list); @@ -456,9 +453,8 @@ acpi_get_object_info(acpi_handle handle, } /* - * Copy the HID, UID, SUB, and CIDs to the return buffer. - * The variable-length strings are copied to the reserved area - * at the end of the buffer. + * Copy the HID, UID, and CIDs to the return buffer. The variable-length + * strings are copied to the reserved area at the end of the buffer. * * For HID and CID, check if the ID is a PCI Root Bridge. */ @@ -476,11 +472,6 @@ acpi_get_object_info(acpi_handle handle, uid, next_id_string); } - if (sub) { - next_id_string = acpi_ns_copy_device_id(&info->subsystem_id, - sub, next_id_string); - } - if (cid_list) { info->compatible_id_list.count = cid_list->count; info->compatible_id_list.list_size = cid_list->list_size; @@ -522,9 +513,6 @@ cleanup: if (uid) { ACPI_FREE(uid); } - if (sub) { - ACPI_FREE(sub); - } if (cid_list) { ACPI_FREE(cid_list); } @@ -591,6 +579,7 @@ acpi_status acpi_install_method(u8 *buffer) parser_state.aml += acpi_ps_get_opcode_size(opcode); parser_state.pkg_end = acpi_ps_get_next_package_end(&parser_state); path = acpi_ps_get_next_namestring(&parser_state); + method_flags = *parser_state.aml++; aml_start = parser_state.aml; aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start); diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 793383501f81..6e1389babb47 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c @@ -74,10 +74,8 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type) return (AE_BAD_PARAMETER); } - /* - * Special case for the predefined Root Node - * (return type ANY) - */ + /* Special case for the predefined Root Node (return type ANY) */ + if (handle == ACPI_ROOT_OBJECT) { *ret_type = ACPI_TYPE_ANY; return (AE_OK); diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 29d8b7b01dca..f3bcfa20b0ae 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -269,7 +269,8 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, */ if (ACPI_SUCCESS(status) && possible_method_call && (node->type == ACPI_TYPE_METHOD)) { - if (walk_state->opcode == AML_UNLOAD_OP) { + if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) == + ARGP_SUPERNAME) { /* * acpi_ps_get_next_namestring has increased the AML pointer, * so we need to restore the saved AML pointer for method call. @@ -696,7 +697,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state * * PARAMETERS: walk_state - Current state * parser_state - Current parser state object - * arg_type - The argument type (AML_*_ARG) + * arg_type - The parser argument type (ARGP_*) * return_arg - Where the next arg is returned * * RETURN: Status, and an op object containing the next argument. @@ -733,6 +734,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } + acpi_ps_get_next_simple_arg(parser_state, arg_type, arg); break; @@ -798,6 +800,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, case ARGP_TARGET: case ARGP_SUPERNAME: case ARGP_SIMPLENAME: + case ARGP_NAME_OR_REF: subop = acpi_ps_peek_opcode(parser_state); if (subop == 0 || @@ -814,17 +817,17 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_NO_MEMORY); } - /* To support super_name arg of Unload */ + /* super_name allows argument to be a method call */ - if (walk_state->opcode == AML_UNLOAD_OP) { + if (arg_type == ARGP_SUPERNAME) { status = acpi_ps_get_next_namepath(walk_state, parser_state, arg, - 1); + ACPI_POSSIBLE_METHOD_CALL); /* - * If the super_name arg of Unload is a method call, - * we have restored the AML pointer, just free this Arg + * If the super_name argument is a method call, we have + * already restored the AML pointer, just free this Arg */ if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { @@ -835,7 +838,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, status = acpi_ps_get_next_namepath(walk_state, parser_state, arg, - 0); + ACPI_NOT_METHOD_CALL); } } else { /* Single complex argument, nothing returned */ diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 03ac8c9a67ab..a57f473bac83 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -109,10 +109,10 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */ - status = - acpi_ps_get_next_namepath(walk_state, - &(walk_state->parser_state), op, - 1); + status = acpi_ps_get_next_namepath(walk_state, + &(walk_state->parser_state), + op, + ACPI_POSSIBLE_METHOD_CALL); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -124,8 +124,8 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, /* * Op is not a constant or string, append each argument to the Op */ - while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) - && !walk_state->arg_count) { + while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) && + !walk_state->arg_count) { walk_state->aml = walk_state->parser_state.aml; status = diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index ed90fddf2487..40909ddeebb3 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -185,458 +185,458 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */ /* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER, - AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), + AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), /* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER, - AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), + AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), /* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP, - ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_SIMPLE, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_SIMPLE, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP, - ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_CONSTANT), + ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT), /* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP, - ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_CONSTANT), + ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT), /* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP, - ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_CONSTANT), + ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT), /* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP, - ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_CONSTANT), + ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT), /* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP, - ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_NO_OBJ, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_NO_OBJ, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP, - ACPI_TYPE_BUFFER, AML_CLASS_CREATE, - AML_TYPE_CREATE_OBJECT, - AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), + ACPI_TYPE_BUFFER, AML_CLASS_CREATE, + AML_TYPE_CREATE_OBJECT, + AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), /* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP, - ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, - AML_TYPE_CREATE_OBJECT, - AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), + ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, + AML_TYPE_CREATE_OBJECT, + AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), /* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP, - ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_COMPLEX, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED | AML_DEFER), + ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_COMPLEX, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED | AML_DEFER), /* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LOCAL_VARIABLE, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LOCAL_VARIABLE, 0), /* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_METHOD_ARGUMENT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_METHOD_ARGUMENT, 0), /* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R), /* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R), /* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), /* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), /* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), /* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_2T_1R, - AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_2T_1R, + AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT), /* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), /* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP, - ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP, - ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), /* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), /* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), /* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R), /* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, - AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, + AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT), /* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP, - ARGI_CREATE_DWORD_FIELD_OP, - ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, - AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_CREATE), + ARGI_CREATE_DWORD_FIELD_OP, + ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, + AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_CREATE), /* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP, - ARGI_CREATE_WORD_FIELD_OP, - ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, - AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_CREATE), + ARGI_CREATE_WORD_FIELD_OP, + ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, + AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_CREATE), /* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP, - ARGI_CREATE_BYTE_FIELD_OP, - ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, - AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_CREATE), + ARGI_CREATE_BYTE_FIELD_OP, + ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, + AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_CREATE), /* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP, - ARGI_CREATE_BIT_FIELD_OP, - ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, - AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_CREATE), -/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), + ARGI_CREATE_BIT_FIELD_OP, + ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, + AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_CREATE), +/* 37 */ ACPI_OP("ObjectType", ARGP_OBJECT_TYPE_OP, ARGI_OBJECT_TYPE_OP, + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, - AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, + AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), /* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_0T_1R, + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), /* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_0T_1R, + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), /* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), /* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY, - AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), + AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), /* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY, - AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), + AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), /* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY, - AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), + AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), /* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY, - AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), + AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), /* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP, - ACPI_TYPE_ANY, AML_CLASS_CONTROL, - AML_TYPE_CONTROL, AML_HAS_ARGS), + ACPI_TYPE_ANY, AML_CLASS_CONTROL, + AML_TYPE_CONTROL, AML_HAS_ARGS), /* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY, - AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), + AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), /* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP, - ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), + ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), /* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER, - AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), + AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), /* Prefixed opcodes (Two-byte opcodes with a prefix op) */ /* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, - AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, + AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), /* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), /* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP, - ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, - AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_FIELD | AML_CREATE), + ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, + AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_FIELD | AML_CREATE), /* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, - AML_FLAGS_EXEC_1A_1T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, + AML_FLAGS_EXEC_1A_1T_0R), /* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, - AML_FLAGS_EXEC_1A_0T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, + AML_FLAGS_EXEC_1A_0T_0R), /* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, - AML_FLAGS_EXEC_1A_0T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, + AML_FLAGS_EXEC_1A_0T_0R), /* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), /* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), /* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, + AML_FLAGS_EXEC_2A_0T_1R), /* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, - AML_FLAGS_EXEC_1A_0T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, + AML_FLAGS_EXEC_1A_0T_0R), /* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), /* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), /* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP, - ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, - AML_TYPE_CONSTANT, 0), + ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, + AML_TYPE_CONSTANT, 0), /* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_CONSTANT, 0), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_CONSTANT, 0), /* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, - AML_FLAGS_EXEC_3A_0T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, + AML_FLAGS_EXEC_3A_0T_0R), /* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP, - ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_COMPLEX, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED | AML_DEFER), + ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_COMPLEX, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED | AML_DEFER), /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, - ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_NO_OBJ, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_NO_OBJ, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP, - ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_SIMPLE, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_SIMPLE, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP, - ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_SIMPLE, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_SIMPLE, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP, - ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED), + ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL, + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED), /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, - ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, - ACPI_TYPE_LOCAL_BANK_FIELD, + ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD | AML_DEFER), /* Internal opcodes that map to invalid AML opcodes */ /* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP, - ACPI_TYPE_ANY, AML_CLASS_INTERNAL, - AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_INTERNAL, + AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), /* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP, - ACPI_TYPE_ANY, AML_CLASS_INTERNAL, - AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_INTERNAL, + AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), /* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP, - ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY, - AML_CLASS_INTERNAL, AML_TYPE_BOGUS, - AML_HAS_ARGS | AML_CONSTANT), + ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, + AML_HAS_ARGS | AML_CONSTANT), /* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP, - ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE), + ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE), /* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP, - ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL, - AML_TYPE_METHOD_CALL, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), + ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL, + AML_TYPE_METHOD_CALL, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), /* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP, - ACPI_TYPE_ANY, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, 0), + ACPI_TYPE_ANY, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, 0), /* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP, - ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY, - AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), + ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), /* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP, - ACPI_TYPE_ANY, AML_CLASS_INTERNAL, - AML_TYPE_BOGUS, - AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), + ACPI_TYPE_ANY, AML_CLASS_INTERNAL, + AML_TYPE_BOGUS, + AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), /* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP, - ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY, - AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), + ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), /* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP, - ARGI_STATICSTRING_OP, ACPI_TYPE_ANY, - AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), + ARGI_STATICSTRING_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), /* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, - AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, - AML_HAS_ARGS | AML_HAS_RETVAL), + AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, + AML_HAS_ARGS | AML_HAS_RETVAL), /* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID, - AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), + AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), /* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, - AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), + AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), /* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, - AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), + AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), /* ACPI 2.0 opcodes */ /* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP, - ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, - AML_TYPE_LITERAL, AML_CONSTANT), + ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT), /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP, ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, AML_HAS_ARGS | AML_DEFER), /* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP, - ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), + ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), /* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), /* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP, - ARGI_CREATE_QWORD_FIELD_OP, - ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, - AML_TYPE_CREATE_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | - AML_DEFER | AML_CREATE), + ARGI_CREATE_QWORD_FIELD_OP, + ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, + AML_TYPE_CREATE_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | + AML_DEFER | AML_CREATE), /* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP, - ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, - AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), /* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_2A_1T_1R, - AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_2A_1T_1R, + AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), /* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), /* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, - AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, + AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT), /* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP, - ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), + ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), /* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP, - ACPI_TYPE_ANY, AML_CLASS_EXECUTE, - AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), + ACPI_TYPE_ANY, AML_CLASS_EXECUTE, + AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), /* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP, - ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, - AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | - AML_NSNODE | AML_NAMED | AML_DEFER), + ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE | AML_NAMED | AML_DEFER), /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, - ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_NO_OBJ, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, + AML_TYPE_NAMED_NO_OBJ, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE), /* ACPI 3.0 opcodes */ /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, + AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, AML_FLAGS_EXEC_0A_0T_1R), /* ACPI 5.0 opcodes */ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 98001d7f6f80..b729d9b291d0 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -526,8 +526,8 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) } /* - * If the transfer to the new method method call worked, a new walk - * state was created -- get it + * If the transfer to the new method method call worked + *, a new walk state was created -- get it */ walk_state = acpi_ds_get_current_walk_state(thread); continue; @@ -544,8 +544,8 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) /* Check for possible multi-thread reentrancy problem */ if ((status == AE_ALREADY_EXISTS) && - (!(walk_state->method_desc->method. - info_flags & ACPI_METHOD_SERIALIZED))) { + (!(walk_state->method_desc->method.info_flags & + ACPI_METHOD_SERIALIZED))) { /* * Method is not serialized and tried to create an object * twice. The probable cause is that the method cannot diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 71d2877cd2ce..6cb02a2a1468 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -175,8 +175,8 @@ void acpi_ps_free_op(union acpi_parse_object *op) ACPI_FUNCTION_NAME(ps_free_op); if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { - ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n", - op)); + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, + "Free retval op: %p\n", op)); } if (op->common.flags & ACPI_PARSEOP_GENERIC) { diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index ba5f69171288..f620d4395b66 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -99,6 +99,7 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) if (op == subtree_root) { return_VOID; } + if (next) { op = next; } else { diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 66d406e8fe36..bdb7e73cdf4a 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -312,8 +312,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource, /* Validate the Resource Type */ - if ((aml->address.resource_type > 2) - && (aml->address.resource_type < 0xC0)) { + if ((aml->address.resource_type > 2) && + (aml->address.resource_type < 0xC0)) { return (FALSE); } diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index cb739a694931..88fce58cc545 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -143,16 +143,17 @@ acpi_rs_stream_option_length(u32 resource_length, ACPI_FUNCTION_ENTRY(); /* - * The resource_source_index and resource_source are optional elements of some - * Large-type resource descriptors. + * The resource_source_index and resource_source are optional elements of + * some Large-type resource descriptors. */ /* - * If the length of the actual resource descriptor is greater than the ACPI - * spec-defined minimum length, it means that a resource_source_index exists - * and is followed by a (required) null terminated string. The string length - * (including the null terminator) is the resource length minus the minimum - * length, minus one byte for the resource_source_index itself. + * If the length of the actual resource descriptor is greater than the + * ACPI spec-defined minimum length, it means that a resource_source_index + * exists and is followed by a (required) null terminated string. The + * string length (including the null terminator) is the resource length + * minus the minimum length, minus one byte for the resource_source_index + * itself. */ if (resource_length > minimum_aml_resource_length) { @@ -277,11 +278,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource, * 16-Bit Address Resource: * Add the size of the optional resource_source info */ - total_size = (acpi_rs_length) - (total_size + - acpi_rs_struct_option_length(&resource->data. - address16. - resource_source)); + total_size = (acpi_rs_length) (total_size + + acpi_rs_struct_option_length + (&resource->data. + address16. + resource_source)); break; case ACPI_RESOURCE_TYPE_ADDRESS32: @@ -289,11 +290,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource, * 32-Bit Address Resource: * Add the size of the optional resource_source info */ - total_size = (acpi_rs_length) - (total_size + - acpi_rs_struct_option_length(&resource->data. - address32. - resource_source)); + total_size = (acpi_rs_length) (total_size + + acpi_rs_struct_option_length + (&resource->data. + address32. + resource_source)); break; case ACPI_RESOURCE_TYPE_ADDRESS64: @@ -301,11 +302,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource, * 64-Bit Address Resource: * Add the size of the optional resource_source info */ - total_size = (acpi_rs_length) - (total_size + - acpi_rs_struct_option_length(&resource->data. - address64. - resource_source)); + total_size = (acpi_rs_length) (total_size + + acpi_rs_struct_option_length + (&resource->data. + address64. + resource_source)); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: @@ -314,26 +315,28 @@ acpi_rs_get_aml_length(struct acpi_resource *resource, * Add the size of each additional optional interrupt beyond the * required 1 (4 bytes for each u32 interrupt number) */ - total_size = (acpi_rs_length) - (total_size + - ((resource->data.extended_irq.interrupt_count - - 1) * 4) + - /* Add the size of the optional resource_source info */ - acpi_rs_struct_option_length(&resource->data. - extended_irq. - resource_source)); + total_size = (acpi_rs_length) (total_size + + ((resource->data. + extended_irq. + interrupt_count - + 1) * 4) + + /* Add the size of the optional resource_source info */ + acpi_rs_struct_option_length + (&resource->data. + extended_irq. + resource_source)); break; case ACPI_RESOURCE_TYPE_GPIO: - total_size = - (acpi_rs_length) (total_size + - (resource->data.gpio. - pin_table_length * 2) + - resource->data.gpio. - resource_source.string_length + - resource->data.gpio. - vendor_length); + total_size = (acpi_rs_length) (total_size + + (resource->data.gpio. + pin_table_length * 2) + + resource->data.gpio. + resource_source. + string_length + + resource->data.gpio. + vendor_length); break; @@ -566,8 +569,8 @@ acpi_rs_get_list_length(u8 * aml_buffer, acpi_gbl_resource_struct_sizes[resource_index] + extra_struct_bytes; } - buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); + buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); *size_needed += buffer_size; ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index a5344428f3ae..603e544e3f64 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -81,8 +81,9 @@ acpi_buffer_to_resource(u8 *aml_buffer, /* Get the required length for the converted resource */ - status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length, - &list_size_needed); + status = + acpi_rs_get_list_length(aml_buffer, aml_buffer_length, + &list_size_needed); if (status == AE_AML_NO_RESOURCE_END_TAG) { status = AE_OK; } @@ -232,8 +233,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, /* Get the required buffer length */ - status = acpi_rs_get_pci_routing_table_length(package_object, - &buffer_size_needed); + status = + acpi_rs_get_pci_routing_table_length(package_object, + &buffer_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -270,9 +272,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); /* - * Fill in the Length field with the information we have at this point. - * The minus four is to subtract the size of the u8 Source[4] member - * because it is added below. + * Fill in the Length field with the information we have at this + * point. The minus four is to subtract the size of the u8 + * Source[4] member because it is added below. */ user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4); @@ -345,11 +347,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, (u8 *) output_buffer->pointer); path_buffer.pointer = user_prt->source; - status = - acpi_ns_handle_to_pathname((acpi_handle) - node, - &path_buffer, - FALSE); + status = acpi_ns_handle_to_pathname((acpi_handle) node, &path_buffer, FALSE); /* +1 to include null terminator */ @@ -371,8 +369,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, case ACPI_TYPE_INTEGER: /* - * If this is a number, then the Source Name is NULL, since the - * entire buffer was zeroed out, we can leave this alone. + * If this is a number, then the Source Name is NULL, since + * the entire buffer was zeroed out, we can leave this alone. * * Add to the Length field the length of the u32 NULL */ @@ -451,9 +449,9 @@ acpi_rs_create_aml_resources(struct acpi_buffer *resource_list, /* Get the buffer size needed for the AML byte stream */ - status = acpi_rs_get_aml_length(resource_list->pointer, - resource_list->length, - &aml_size_needed); + status = + acpi_rs_get_aml_length(resource_list->pointer, + resource_list->length, &aml_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", (u32)aml_size_needed, acpi_format_exception(status))); diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index 2a09288e7c57..05cc560699e1 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c @@ -483,6 +483,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource) static void acpi_rs_out_string(char *title, char *value) { + acpi_os_printf("%27s : %s", title, value); if (!*value) { acpi_os_printf("[NULL NAMESTRING]"); @@ -497,21 +498,25 @@ static void acpi_rs_out_integer8(char *title, u8 value) static void acpi_rs_out_integer16(char *title, u16 value) { + acpi_os_printf("%27s : %4.4X\n", title, value); } static void acpi_rs_out_integer32(char *title, u32 value) { + acpi_os_printf("%27s : %8.8X\n", title, value); } static void acpi_rs_out_integer64(char *title, u64 value) { + acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value)); } static void acpi_rs_out_title(char *title) { + acpi_os_printf("%27s : ", title); } @@ -544,6 +549,7 @@ static void acpi_rs_dump_short_byte_list(u8 length, u8 * data) for (i = 0; i < length; i++) { acpi_os_printf("%X ", data[i]); } + acpi_os_printf("\n"); } diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 50d5be2ee062..286ccb461a20 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -89,6 +89,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, /* Get the appropriate conversion info table */ aml_resource = ACPI_CAST_PTR(union aml_resource, aml); + if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) { if (aml_resource->common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) { @@ -225,10 +226,10 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, /* Perform final sanity check on the new AML resource descriptor */ - status = acpi_ut_validate_resource(NULL, - ACPI_CAST_PTR(union - aml_resource, - aml), NULL); + status = + acpi_ut_validate_resource(NULL, + ACPI_CAST_PTR(union aml_resource, + aml), NULL); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index ac37852e0821..b112c7b1abbf 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c @@ -189,8 +189,8 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); - resource->length = resource->length + - (info->value * item_count); + resource->length = + resource->length + (info->value * item_count); break; case ACPI_RSC_COUNT_GPIO_RES: @@ -445,8 +445,8 @@ exit: /* Round the resource struct length up to the next boundary (32 or 64) */ - resource->length = - (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length); + resource->length = (u32) + ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length); } return_ACPI_STATUS(AE_OK); } @@ -550,9 +550,8 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); - aml_length = - (u16) (aml_length + - (info->value * (item_count - 1))); + aml_length = (u16) + (aml_length + (info->value * (item_count - 1))); break; case ACPI_RSC_COUNT16: @@ -723,11 +722,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, /* * 16-bit encoded bitmask (IRQ macro) */ - temp16 = acpi_rs_encode_bitmask(source, - *ACPI_ADD_PTR(u8, - resource, - info-> - value)); + temp16 = + acpi_rs_encode_bitmask(source, + *ACPI_ADD_PTR(u8, resource, + info->value)); ACPI_MOVE_16_TO_16(destination, &temp16); break; diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 9486992edbb8..33e558c9434f 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c @@ -221,14 +221,13 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length, ACPI_MOVE_16_TO_16(&aml->large_header.resource_length, &resource_length); } else { - /* Small descriptor -- bits 2:0 of byte 0 contain the length */ - + /* + * Small descriptor -- bits 2:0 of byte 0 contain the length + * Clear any existing length, preserving descriptor type bits + */ aml->small_header.descriptor_type = (u8) - - /* Clear any existing length, preserving descriptor type bits */ - ((aml->small_header. - descriptor_type & ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK) - + ((aml->small_header.descriptor_type & + ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK) | resource_length); } } @@ -333,8 +332,8 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length, aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length); /* - * resource_source is present if the length of the descriptor is longer than - * the minimum length. + * resource_source is present if the length of the descriptor is longer + * than the minimum length. * * Note: Some resource descriptors will have an additional null, so * we add 1 to the minimum length. @@ -366,6 +365,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length, total_length = (u32)strlen(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1; + total_length = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(total_length); memset(resource_source->string_ptr, 0, total_length); @@ -438,8 +438,8 @@ acpi_rs_set_resource_source(union aml_resource * aml, * Add the length of the string (+ 1 for null terminator) to the * final descriptor length */ - descriptor_length += - ((acpi_rsdesc_size) resource_source->string_length + 1); + descriptor_length += ((acpi_rsdesc_size) + resource_source->string_length + 1); } /* Return the new total length of the AML descriptor */ @@ -478,8 +478,9 @@ acpi_rs_get_prt_method_data(struct acpi_namespace_node * node, /* Execute the method, no parameters */ - status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT, - ACPI_BTYPE_PACKAGE, &obj_desc); + status = + acpi_ut_evaluate_object(node, METHOD_NAME__PRT, ACPI_BTYPE_PACKAGE, + &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -527,8 +528,9 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node, /* Execute the method, no parameters */ - status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS, - ACPI_BTYPE_BUFFER, &obj_desc); + status = + acpi_ut_evaluate_object(node, METHOD_NAME__CRS, ACPI_BTYPE_BUFFER, + &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -577,8 +579,9 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node, /* Execute the method, no parameters */ - status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS, - ACPI_BTYPE_BUFFER, &obj_desc); + status = + acpi_ut_evaluate_object(node, METHOD_NAME__PRS, ACPI_BTYPE_BUFFER, + &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -627,8 +630,9 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node, /* Execute the method, no parameters */ - status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI, - ACPI_BTYPE_BUFFER, &obj_desc); + status = + acpi_ut_evaluate_object(node, METHOD_NAME__AEI, ACPI_BTYPE_BUFFER, + &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index 1e8cd5723326..308bfd6bff5f 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c @@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsxface") /* Local macros for 16,32-bit to 64-bit conversion */ #define ACPI_COPY_FIELD(out, in, field) ((out)->field = (in)->field) -#define ACPI_COPY_ADDRESS(out, in) \ +#define ACPI_COPY_ADDRESS(out, in) \ ACPI_COPY_FIELD(out, in, resource_type); \ ACPI_COPY_FIELD(out, in, producer_consumer); \ ACPI_COPY_FIELD(out, in, decode); \ diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 5c9d5abf1588..4a8152777767 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -407,6 +407,7 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature) table_desc->signature.ascii : "????", ACPI_FORMAT_UINT64(table_desc-> address))); + goto invalidate_and_exit; } } diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 6319b42420c6..bd87801acedf 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -337,8 +337,8 @@ acpi_tb_install_standard_table(acpi_physical_address address, * need to be unregistered when they are unloaded, and slots in the * root table list should be reused when empty. */ - if (acpi_gbl_root_table_list.tables[i]. - flags & ACPI_TABLE_IS_LOADED) { + if (acpi_gbl_root_table_list.tables[i].flags & + ACPI_TABLE_IS_LOADED) { /* Table is still loaded, this is an error */ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 709d5112fc16..d0d12596cfc9 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -76,6 +76,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length) if (!isprint((int)*string)) { *string = '?'; } + string++; length--; } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index d8ddef38c947..7c1b5f8a5cbf 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -121,6 +121,7 @@ void acpi_tb_check_dsdt_header(void) ACPI_BIOS_ERROR((AE_INFO, "The DSDT has been corrupted or replaced - " "old, new headers below")); + acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header); acpi_tb_print_table_header(0, acpi_gbl_DSDT); @@ -379,7 +380,6 @@ next_table: } acpi_os_unmap_memory(table, length); - return_ACPI_STATUS(AE_OK); } @@ -389,7 +389,7 @@ next_table: * * PARAMETERS: signature - Sig string to be validated * - * RETURN: TRUE if signature is correct length and has valid characters + * RETURN: TRUE if signature is has 4 valid ACPI characters * * DESCRIPTION: Validate an ACPI table signature. * @@ -399,12 +399,6 @@ u8 acpi_is_valid_signature(char *signature) { u32 i; - /* Validate the signature length */ - - if (strlen(signature) != ACPI_NAME_SIZE) { - return (FALSE); - } - /* Validate each character in the signature */ for (i = 0; i < ACPI_NAME_SIZE; i++) { diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 55ee14ca9418..ca2f1366b498 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -191,6 +191,7 @@ acpi_status acpi_tb_load_namespace(void) "(%4.4s:%8.8s) while loading table", table->signature.ascii, table->pointer->oem_table_id)); + tables_failed++; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, @@ -206,7 +207,7 @@ acpi_status acpi_tb_load_namespace(void) if (!tables_failed) { ACPI_INFO((AE_INFO, - "%u ACPI AML tables successfully acquired and loaded", + "%u ACPI AML tables successfully acquired and loaded\n", tables_loaded)); } else { ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index 911ea8e7fe87..38a29e235b74 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -239,8 +239,9 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id, overlap_count++; if (warn) { /* Optional warning message */ pathname = - acpi_ns_get_external_pathname(range_info-> - region_node); + acpi_ns_get_normalized_pathname(range_info-> + region_node, + TRUE); ACPI_WARNING((AE_INFO, "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)", diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 257221d452c8..ade8acf3f3a5 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -257,9 +257,9 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type, ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; - target_object = (union acpi_object *) - &((union acpi_object *)(state->pkg.dest_object))->package. - elements[this_index]; + target_object = (union acpi_object *)&((union acpi_object *) + (state->pkg.dest_object))-> + package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: @@ -348,15 +348,15 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, * Free space begins right after the first package */ info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); - info.free_space = - buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); + info.free_space = buffer + + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.object_space = 0; info.num_packages = 1; external_object->type = internal_object->common.type; external_object->package.count = internal_object->package.count; - external_object->package.elements = ACPI_CAST_PTR(union acpi_object, - info.free_space); + external_object->package.elements = + ACPI_CAST_PTR(union acpi_object, info.free_space); /* * Leave room for an array of ACPI_OBJECTS in the buffer @@ -593,8 +593,8 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, package_elements = package_object->package.elements; /* - * Recursive implementation. Probably ok, since nested external packages - * as parameters should be very rare. + * Recursive implementation. Probably ok, since nested external + * packages as parameters should be very rare. */ for (i = 0; i < external_object->package.count; i++) { status = @@ -649,9 +649,8 @@ acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object, /* * Build a simple object (no nested objects) */ - status = - acpi_ut_copy_esimple_to_isimple(external_object, - internal_object); + status = acpi_ut_copy_esimple_to_isimple(external_object, + internal_object); } return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index ecaaaffc0788..3533135dbd4d 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -114,7 +114,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "PCC" /* 0x0A */ }; -char *acpi_ut_get_region_name(u8 space_id) +const char *acpi_ut_get_region_name(u8 space_id) { if (space_id >= ACPI_USER_REGION_BEGIN) { @@ -127,7 +127,7 @@ char *acpi_ut_get_region_name(u8 space_id) return ("InvalidSpaceId"); } - return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id])); + return (acpi_gbl_region_types[space_id]); } /******************************************************************************* @@ -152,14 +152,14 @@ static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = { "RealTimeClock", }; -char *acpi_ut_get_event_name(u32 event_id) +const char *acpi_ut_get_event_name(u32 event_id) { if (event_id > ACPI_EVENT_MAX) { return ("InvalidEventID"); } - return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id])); + return (acpi_gbl_event_types[event_id]); } /******************************************************************************* @@ -180,7 +180,8 @@ char *acpi_ut_get_event_name(u32 event_id) * * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching; * when stored in a table it really means that we have thus far seen no - * evidence to indicate what type is actually going to be stored for this entry. + * evidence to indicate what type is actually going to be stored for this + & entry. */ static const char acpi_gbl_bad_type[] = "UNDEFINED"; @@ -220,17 +221,17 @@ static const char *acpi_gbl_ns_type_names[] = { /* 30 */ "Invalid" }; -char *acpi_ut_get_type_name(acpi_object_type type) +const char *acpi_ut_get_type_name(acpi_object_type type) { if (type > ACPI_TYPE_INVALID) { - return (ACPI_CAST_PTR(char, acpi_gbl_bad_type)); + return (acpi_gbl_bad_type); } - return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type])); + return (acpi_gbl_ns_type_names[type]); } -char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc) +const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc) { ACPI_FUNCTION_TRACE(ut_get_object_type_name); @@ -267,7 +268,7 @@ char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc) * ******************************************************************************/ -char *acpi_ut_get_node_name(void *object) +const char *acpi_ut_get_node_name(void *object) { struct acpi_namespace_node *node = (struct acpi_namespace_node *)object; @@ -333,7 +334,7 @@ static const char *acpi_gbl_desc_type_names[] = { /* 15 */ "Node" }; -char *acpi_ut_get_descriptor_name(void *object) +const char *acpi_ut_get_descriptor_name(void *object) { if (!object) { @@ -344,10 +345,7 @@ char *acpi_ut_get_descriptor_name(void *object) return ("Not a Descriptor"); } - return (ACPI_CAST_PTR(char, - acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE - (object)])); - + return (acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE(object)]); } /******************************************************************************* @@ -415,7 +413,7 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object) /* Names for internal mutex objects, used for debug output */ -static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { +static const char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { "ACPI_MTX_Interpreter", "ACPI_MTX_Namespace", "ACPI_MTX_Tables", @@ -424,7 +422,7 @@ static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { "ACPI_MTX_Memory", }; -char *acpi_ut_get_mutex_name(u32 mutex_id) +const char *acpi_ut_get_mutex_name(u32 mutex_id) { if (mutex_id > ACPI_MAX_MUTEX) { diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 1638312e3d8f..1afd7427a90c 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -209,6 +209,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) acpi_ut_delete_object_desc(object->method.mutex); object->method.mutex = NULL; } + if (object->method.node) { object->method.node = NULL; } @@ -515,8 +516,8 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } /* - * All sub-objects must have their reference count incremented also. - * Different object types have different subobjects. + * All sub-objects must have their reference count incremented + * also. Different object types have different subobjects. */ switch (object->common.type) { case ACPI_TYPE_DEVICE: diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 9ef80f2828e3..f93bb90ea72a 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -217,8 +217,9 @@ acpi_ut_namespace_error(const char *module_name, } else { /* Convert path to external format */ - status = acpi_ns_externalize_name(ACPI_UINT32_MAX, - internal_name, NULL, &name); + status = + acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_name, + NULL, &name); /* Print target name */ @@ -271,9 +272,8 @@ acpi_ut_method_error(const char *module_name, acpi_os_printf(ACPI_MSG_ERROR); if (path) { - status = - acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH, - &node); + status = acpi_ns_get_node(prefix_node, path, + ACPI_NS_NO_UPSEARCH, &node); if (ACPI_FAILURE(status)) { acpi_os_printf("[Could not get node by pathname]"); } diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c deleted file mode 100644 index d435b7b7eb94..000000000000 --- a/drivers/acpi/acpica/utfileio.c +++ /dev/null @@ -1,334 +0,0 @@ -/******************************************************************************* - * - * Module Name: utfileio - simple file I/O routines - * - ******************************************************************************/ - -/* - * Copyright (C) 2000 - 2015, Intel Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * substantially similar to the "NO WARRANTY" disclaimer below - * ("Disclaimer") and any redistribution must be conditioned upon - * including a substantially similar Disclaimer requirement for further - * binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * NO WARRANTY - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGES. - */ - -#include <acpi/acpi.h> -#include "accommon.h" -#include "actables.h" -#include "acapps.h" -#include "errno.h" - -#ifdef ACPI_ASL_COMPILER -#include "aslcompiler.h" -#endif - -#define _COMPONENT ACPI_CA_DEBUGGER -ACPI_MODULE_NAME("utfileio") - -#ifdef ACPI_APPLICATION -/* Local prototypes */ -static acpi_status -acpi_ut_check_text_mode_corruption(u8 *table, - u32 table_length, u32 file_length); - -static acpi_status -acpi_ut_read_table(FILE * fp, - struct acpi_table_header **table, u32 *table_length); - -/******************************************************************************* - * - * FUNCTION: acpi_ut_check_text_mode_corruption - * - * PARAMETERS: table - Table buffer - * table_length - Length of table from the table header - * file_length - Length of the file that contains the table - * - * RETURN: Status - * - * DESCRIPTION: Check table for text mode file corruption where all linefeed - * characters (LF) have been replaced by carriage return linefeed - * pairs (CR/LF). - * - ******************************************************************************/ - -static acpi_status -acpi_ut_check_text_mode_corruption(u8 *table, u32 table_length, u32 file_length) -{ - u32 i; - u32 pairs = 0; - - if (table_length != file_length) { - ACPI_WARNING((AE_INFO, - "File length (0x%X) is not the same as the table length (0x%X)", - file_length, table_length)); - } - - /* Scan entire table to determine if each LF has been prefixed with a CR */ - - for (i = 1; i < file_length; i++) { - if (table[i] == 0x0A) { - if (table[i - 1] != 0x0D) { - - /* The LF does not have a preceding CR, table not corrupted */ - - return (AE_OK); - } else { - /* Found a CR/LF pair */ - - pairs++; - } - i++; - } - } - - if (!pairs) { - return (AE_OK); - } - - /* - * Entire table scanned, each CR is part of a CR/LF pair -- - * meaning that the table was treated as a text file somewhere. - * - * NOTE: We can't "fix" the table, because any existing CR/LF pairs in the - * original table are left untouched by the text conversion process -- - * meaning that we cannot simply replace CR/LF pairs with LFs. - */ - acpi_os_printf("Table has been corrupted by text mode conversion\n"); - acpi_os_printf("All LFs (%u) were changed to CR/LF pairs\n", pairs); - acpi_os_printf("Table cannot be repaired!\n"); - return (AE_BAD_VALUE); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_read_table - * - * PARAMETERS: fp - File that contains table - * table - Return value, buffer with table - * table_length - Return value, length of table - * - * RETURN: Status - * - * DESCRIPTION: Load the DSDT from the file pointer - * - ******************************************************************************/ - -static acpi_status -acpi_ut_read_table(FILE * fp, - struct acpi_table_header **table, u32 *table_length) -{ - struct acpi_table_header table_header; - u32 actual; - acpi_status status; - u32 file_size; - u8 standard_header = TRUE; - s32 count; - - /* Get the file size */ - - file_size = cm_get_file_size(fp); - if (file_size == ACPI_UINT32_MAX) { - return (AE_ERROR); - } - - if (file_size < 4) { - return (AE_BAD_HEADER); - } - - /* Read the signature */ - - fseek(fp, 0, SEEK_SET); - - count = fread(&table_header, 1, sizeof(struct acpi_table_header), fp); - if (count != sizeof(struct acpi_table_header)) { - acpi_os_printf("Could not read the table header\n"); - return (AE_BAD_HEADER); - } - - /* The RSDP table does not have standard ACPI header */ - - if (ACPI_VALIDATE_RSDP_SIG(table_header.signature)) { - *table_length = file_size; - standard_header = FALSE; - } else { - -#if 0 - /* Validate the table header/length */ - - status = acpi_tb_validate_table_header(&table_header); - if (ACPI_FAILURE(status)) { - acpi_os_printf("Table header is invalid!\n"); - return (status); - } -#endif - - /* File size must be at least as long as the Header-specified length */ - - if (table_header.length > file_size) { - acpi_os_printf - ("TableHeader length [0x%X] greater than the input file size [0x%X]\n", - table_header.length, file_size); - -#ifdef ACPI_ASL_COMPILER - acpi_os_printf("File is corrupt or is ASCII text -- " - "it must be a binary file\n"); -#endif - return (AE_BAD_HEADER); - } -#ifdef ACPI_OBSOLETE_CODE - /* We only support a limited number of table types */ - - if (!ACPI_COMPARE_NAME - ((char *)table_header.signature, ACPI_SIG_DSDT) - && !ACPI_COMPARE_NAME((char *)table_header.signature, - ACPI_SIG_PSDT) - && !ACPI_COMPARE_NAME((char *)table_header.signature, - ACPI_SIG_SSDT)) { - acpi_os_printf - ("Table signature [%4.4s] is invalid or not supported\n", - (char *)table_header.signature); - ACPI_DUMP_BUFFER(&table_header, - sizeof(struct acpi_table_header)); - return (AE_ERROR); - } -#endif - - *table_length = table_header.length; - } - - /* Allocate a buffer for the table */ - - *table = acpi_os_allocate((size_t) file_size); - if (!*table) { - acpi_os_printf - ("Could not allocate memory for ACPI table %4.4s (size=0x%X)\n", - table_header.signature, *table_length); - return (AE_NO_MEMORY); - } - - /* Get the rest of the table */ - - fseek(fp, 0, SEEK_SET); - actual = fread(*table, 1, (size_t) file_size, fp); - if (actual == file_size) { - if (standard_header) { - - /* Now validate the checksum */ - - status = acpi_tb_verify_checksum((void *)*table, - ACPI_CAST_PTR(struct - acpi_table_header, - *table)-> - length); - - if (status == AE_BAD_CHECKSUM) { - status = - acpi_ut_check_text_mode_corruption((u8 *) - *table, - file_size, - (*table)-> - length); - return (status); - } - } - return (AE_OK); - } - - if (actual > 0) { - acpi_os_printf("Warning - reading table, asked for %X got %X\n", - file_size, actual); - return (AE_OK); - } - - acpi_os_printf("Error - could not read the table file\n"); - acpi_os_free(*table); - *table = NULL; - *table_length = 0; - return (AE_ERROR); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_read_table_from_file - * - * PARAMETERS: filename - File where table is located - * table - Where a pointer to the table is returned - * - * RETURN: Status - * - * DESCRIPTION: Get an ACPI table from a file - * - ******************************************************************************/ - -acpi_status -acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table) -{ - FILE *file; - u32 file_size; - u32 table_length; - acpi_status status = AE_ERROR; - - /* Open the file, get current size */ - - file = fopen(filename, "rb"); - if (!file) { - perror("Could not open input file"); - - if (errno == ENOENT) { - return (AE_NOT_EXIST); - } - - return (status); - } - - file_size = cm_get_file_size(file); - if (file_size == ACPI_UINT32_MAX) { - goto exit; - } - - /* Get the entire file */ - - fprintf(stderr, - "Reading ACPI table from file %12s - Length %.8u (0x%06X)\n", - filename, file_size, file_size); - - status = acpi_ut_read_table(file, table, &table_length); - if (ACPI_FAILURE(status)) { - acpi_os_printf("Could not get table from the file\n"); - } - -exit: - fclose(file); - return (status); -} - -#endif diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index fda8b3def81c..8ad086ed1a06 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -48,7 +48,7 @@ ACPI_MODULE_NAME("uthex") /* Hex to ASCII conversion table */ -static char acpi_gbl_hex_to_ascii[] = { +static const char acpi_gbl_hex_to_ascii[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 7956df1e263c..05ee76eec314 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -127,73 +127,6 @@ cleanup: /******************************************************************************* * - * FUNCTION: acpi_ut_execute_SUB - * - * PARAMETERS: device_node - Node for the device - * return_id - Where the _SUB is returned - * - * RETURN: Status - * - * DESCRIPTION: Executes the _SUB control method that returns the subsystem - * ID of the device. The _SUB value is always a string containing - * either a valid PNP or ACPI ID. - * - * NOTE: Internal function, no parameter validation - * - ******************************************************************************/ - -acpi_status -acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, - struct acpi_pnp_device_id **return_id) -{ - union acpi_operand_object *obj_desc; - struct acpi_pnp_device_id *sub; - u32 length; - acpi_status status; - - ACPI_FUNCTION_TRACE(ut_execute_SUB); - - status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB, - ACPI_BTYPE_STRING, &obj_desc); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* Get the size of the String to be returned, includes null terminator */ - - length = obj_desc->string.length + 1; - - /* Allocate a buffer for the SUB */ - - sub = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + - (acpi_size) length); - if (!sub) { - status = AE_NO_MEMORY; - goto cleanup; - } - - /* Area for the string starts after PNP_DEVICE_ID struct */ - - sub->string = - ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id)); - - /* Simply copy existing string */ - - strcpy(sub->string, obj_desc->string.pointer); - sub->length = length; - *return_id = sub; - -cleanup: - - /* On exit, we must delete the return object */ - - acpi_ut_remove_reference(obj_desc); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * * FUNCTION: acpi_ut_execute_UID * * PARAMETERS: device_node - Node for the device diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index ccd0745f011e..fd82a122785e 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void) acpi_gbl_next_owner_id_offset = 0; acpi_gbl_debugger_configuration = DEBUGGER_THREADING; acpi_gbl_osi_mutex = NULL; - acpi_gbl_reg_methods_executed = FALSE; acpi_gbl_max_loop_iterations = 0xFFFF; /* Hardware oriented */ diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index f9ff100f0159..58b5d4236429 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c @@ -111,6 +111,7 @@ acpi_ut_short_divide(u64 dividend, */ ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, quotient.part.hi, remainder32); + ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, quotient.part.lo, remainder32); @@ -179,6 +180,7 @@ acpi_ut_divide(u64 in_dividend, */ ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, quotient.part.hi, partial1); + ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, quotient.part.lo, remainder.part.lo); } @@ -206,12 +208,12 @@ acpi_ut_divide(u64 in_dividend, ACPI_DIV_64_BY_32(normalized_dividend.part.hi, normalized_dividend.part.lo, - normalized_divisor.part.lo, - quotient.part.lo, partial1); + normalized_divisor.part.lo, quotient.part.lo, + partial1); /* - * The quotient is always 32 bits, and simply requires adjustment. - * The 64-bit remainder must be generated. + * The quotient is always 32 bits, and simply requires + * adjustment. The 64-bit remainder must be generated. */ partial1 = quotient.part.lo * divisor.part.hi; partial2.full = (u64) quotient.part.lo * divisor.part.lo; diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index bd4443bdcbad..eab1cfeb52cc 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -264,8 +264,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, */ if ((!this_source_obj) || (ACPI_GET_DESCRIPTOR_TYPE(this_source_obj) != - ACPI_DESC_TYPE_OPERAND) - || (this_source_obj->common.type != ACPI_TYPE_PACKAGE)) { + ACPI_DESC_TYPE_OPERAND) || + (this_source_obj->common.type != ACPI_TYPE_PACKAGE)) { status = walk_callback(ACPI_COPY_TYPE_SIMPLE, this_source_obj, state, context); @@ -318,9 +318,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, * The callback above returned a new target package object. */ acpi_ut_push_generic_state(&state_list, state); - state = acpi_ut_create_pkg_state(this_source_obj, - state->pkg. - this_target_obj, 0); + state = + acpi_ut_create_pkg_state(this_source_obj, + state->pkg.this_target_obj, + 0); if (!state) { /* Free any stacked Update State objects */ diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index ce406e39b669..038ff849ad20 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -111,17 +111,6 @@ acpi_status acpi_ut_mutex_initialize(void) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } -#ifdef ACPI_DEBUGGER - - /* Debugger Support */ - - status = acpi_os_create_mutex(&acpi_gbl_db_command_ready); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - status = acpi_os_create_mutex(&acpi_gbl_db_command_complete); -#endif return_ACPI_STATUS(status); } @@ -162,12 +151,6 @@ void acpi_ut_mutex_terminate(void) /* Delete the reader/writer lock */ acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock); - -#ifdef ACPI_DEBUGGER - acpi_os_delete_mutex(acpi_gbl_db_command_ready); - acpi_os_delete_mutex(acpi_gbl_db_command_complete); -#endif - return_VOID; } @@ -290,8 +273,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id))); - status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, - ACPI_WAIT_FOREVER); + status = + acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, + ACPI_WAIT_FOREVER); if (ACPI_SUCCESS(status)) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u acquired Mutex [%s]\n", diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c index 1d5f6b17b766..9c3cadc27fb8 100644 --- a/drivers/acpi/acpica/utnonansi.c +++ b/drivers/acpi/acpica/utnonansi.c @@ -282,8 +282,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) /* Divide the digit into the correct position */ - (void)acpi_ut_short_divide((dividend - (u64)this_digit), - base, "ient, NULL); + (void)acpi_ut_short_divide((dividend - (u64)this_digit), base, + "ient, NULL); if (return_value > quotient) { if (to_integer_op) { diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 7d83efe1ea29..787eccf6a1d5 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -112,9 +112,9 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char /* These types require a secondary object */ - second_object = acpi_ut_allocate_object_desc_dbg(module_name, - line_number, - component_id); + second_object = + acpi_ut_allocate_object_desc_dbg(module_name, line_number, + component_id); if (!second_object) { acpi_ut_delete_object_desc(object); return_PTR(NULL); @@ -253,7 +253,8 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size) buffer = ACPI_ALLOCATE_ZEROED(buffer_size); if (!buffer) { ACPI_ERROR((AE_INFO, "Could not allocate size %u", - (u32) buffer_size)); + (u32)buffer_size)); + acpi_ut_remove_reference(buffer_desc); return_PTR(NULL); } @@ -305,7 +306,8 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size) string = ACPI_ALLOCATE_ZEROED(string_size + 1); if (!string) { ACPI_ERROR((AE_INFO, "Could not allocate size %u", - (u32) string_size)); + (u32)string_size)); + acpi_ut_remove_reference(string_desc); return_PTR(NULL); } @@ -649,8 +651,9 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object, info.object_space = 0; info.num_packages = 1; - status = acpi_ut_walk_package_tree(internal_object, NULL, - acpi_ut_get_element_length, &info); + status = + acpi_ut_walk_package_tree(internal_object, NULL, + acpi_ut_get_element_length, &info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -660,7 +663,8 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object, * just add the length of the package objects themselves. * Round up to the next machine word. */ - info.length += ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) * + info.length += + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) * (acpi_size) info.num_packages; /* Return the total package length */ @@ -692,8 +696,8 @@ acpi_ut_get_object_size(union acpi_operand_object *internal_object, ACPI_FUNCTION_ENTRY(); if ((ACPI_GET_DESCRIPTOR_TYPE(internal_object) == - ACPI_DESC_TYPE_OPERAND) - && (internal_object->common.type == ACPI_TYPE_PACKAGE)) { + ACPI_DESC_TYPE_OPERAND) && + (internal_object->common.type == ACPI_TYPE_PACKAGE)) { status = acpi_ut_get_package_object_size(internal_object, obj_length); diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 8f3d203aed79..0809d73193e1 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -269,9 +269,10 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name) previous_interface = next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!strcmp(interface_name, next_interface->name)) { - - /* Found: name is in either the static list or was added at runtime */ - + /* + * Found: name is in either the static list + * or was added at runtime + */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Interface was added dynamically, remove and free it */ @@ -288,8 +289,8 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name) ACPI_FREE(next_interface); } else { /* - * Interface is in static list. If marked invalid, then it - * does not actually exist. Else, mark it invalid. + * Interface is in static list. If marked invalid, then + * it does not actually exist. Else, mark it invalid. */ if (next_interface->flags & ACPI_OSI_INVALID) { return (AE_NOT_EXIST); diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c index 2959217067cb..ebb811c43c89 100644 --- a/drivers/acpi/acpica/utownerid.c +++ b/drivers/acpi/acpica/utownerid.c @@ -73,8 +73,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) /* Guard against multiple allocations of ID to the same location */ if (*owner_id) { - ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists", - *owner_id)); + ACPI_ERROR((AE_INFO, + "Owner ID [0x%2.2X] already exists", *owner_id)); return_ACPI_STATUS(AE_ALREADY_EXISTS); } @@ -87,8 +87,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) /* * Find a free owner ID, cycle through all possible IDs on repeated - * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have - * to be scanned twice. + * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index + * may have to be scanned twice. */ for (i = 0, j = acpi_gbl_last_owner_id_index; i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) { @@ -141,8 +141,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) * they are released when a table is unloaded or a method completes * execution. * - * If this error happens, there may be very deep nesting of invoked control - * methods, or there may be a bug where the IDs are not released. + * If this error happens, there may be very deep nesting of invoked + * control methods, or there may be a bug where the IDs are not released. */ status = AE_OWNER_ID_LIMIT; ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 97898ed71b4b..9f8e415bf0af 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -225,8 +225,10 @@ const union acpi_predefined_info *acpi_ut_match_resource_name(char *name) { const union acpi_predefined_info *this_name; - /* Quick check for a predefined name, first character must be underscore */ - + /* + * Quick check for a predefined name, first character must + * be underscore + */ if (name[0] != '_') { return (NULL); } diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index b26297c5de49..01f04da779c5 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -314,8 +314,9 @@ static char *acpi_ut_format_number(char *string, if (need_prefix) { string = acpi_ut_bound_string_output(string, end, '0'); if (base == 16) { - string = acpi_ut_bound_string_output(string, end, - upper ? 'X' : 'x'); + string = + acpi_ut_bound_string_output(string, end, + upper ? 'X' : 'x'); } } if (!(type & ACPI_FORMAT_LEFT)) { @@ -400,6 +401,7 @@ acpi_ut_vsnprintf(char *string, } else { break; } + } while (1); /* Process width */ @@ -429,6 +431,7 @@ acpi_ut_vsnprintf(char *string, ++format; precision = va_arg(args, int); } + if (precision < 0) { precision = 0; } @@ -488,10 +491,12 @@ acpi_ut_vsnprintf(char *string, ' '); } } + for (i = 0; i < length; ++i) { pos = acpi_ut_bound_string_output(pos, end, *s); ++s; } + while (length < width--) { pos = acpi_ut_bound_string_output(pos, end, ' '); @@ -529,9 +534,9 @@ acpi_ut_vsnprintf(char *string, } p = va_arg(args, void *); - pos = acpi_ut_format_number(pos, end, - ACPI_TO_INTEGER(p), 16, - width, precision, type); + pos = + acpi_ut_format_number(pos, end, ACPI_TO_INTEGER(p), + 16, width, precision, type); continue; default: diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index b3505dbc715e..d50b41c4daa7 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -441,8 +441,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, acpi_ut_validate_resource(walk_state, aml, &resource_index); if (ACPI_FAILURE(status)) { /* - * Exit on failure. Cannot continue because the descriptor length - * may be bogus also. + * Exit on failure. Cannot continue because the descriptor + * length may be bogus also. */ return_ACPI_STATUS(status); } @@ -568,8 +568,8 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state, } /* - * Check validity of the resource type, via acpi_gbl_resource_types. Zero - * indicates an invalid resource. + * Check validity of the resource type, via acpi_gbl_resource_types. + * Zero indicates an invalid resource. */ if (!acpi_gbl_resource_types[resource_index]) { goto invalid_resource; diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index f201171c5dda..0050e00997ed 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c @@ -246,6 +246,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; + return (state); } @@ -279,6 +280,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void) state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; + return (state); } @@ -304,5 +306,6 @@ void acpi_ut_delete_generic_state(union acpi_generic_state *state) if (state) { (void)acpi_os_release_object(acpi_gbl_state_cache, state); } + return; } diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 4ddd105d9741..958b2f7b552d 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c @@ -135,6 +135,7 @@ void acpi_ut_print_string(char *string, u16 max_length) break; } } + acpi_os_printf("\""); if (i == max_length && string[i]) { @@ -239,6 +240,14 @@ void acpi_ut_repair_name(char *name) ACPI_FUNCTION_NAME(ut_repair_name); + /* + * Special case for the root node. This can happen if we get an + * error during the execution of module-level code. + */ + if (ACPI_COMPARE_NAME(name, "\\___")) { + return; + } + ACPI_MOVE_NAME(&original_name, name); /* Check each character in the name */ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 9a7dc8196a5d..ea698e98442e 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -150,9 +150,9 @@ void *acpi_ut_allocate_and_track(acpi_size size, return (NULL); } - status = acpi_ut_track_allocation(allocation, size, - ACPI_MEM_MALLOC, component, module, - line); + status = + acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, + component, module, line); if (ACPI_FAILURE(status)) { acpi_os_free(allocation); return (NULL); @@ -161,6 +161,7 @@ void *acpi_ut_allocate_and_track(acpi_size size, acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; + if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = @@ -223,6 +224,7 @@ void *acpi_ut_allocate_zeroed_and_track(acpi_size size, acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; + if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = @@ -269,8 +271,8 @@ acpi_ut_free_and_track(void *allocation, acpi_gbl_global_list->total_freed++; acpi_gbl_global_list->current_total_size -= debug_block->size; - status = acpi_ut_remove_allocation(debug_block, - component, module, line); + status = + acpi_ut_remove_allocation(debug_block, component, module, line); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); } @@ -525,35 +527,35 @@ void acpi_ut_dump_allocation_info(void) /* ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Current allocations", - mem_list->current_count, - ROUND_UP_TO_1K (mem_list->current_size))); + ("%30s: %4d (%3d Kb)\n", "Current allocations", + mem_list->current_count, + ROUND_UP_TO_1K (mem_list->current_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", - mem_list->max_concurrent_count, - ROUND_UP_TO_1K (mem_list->max_concurrent_size))); + ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", + mem_list->max_concurrent_count, + ROUND_UP_TO_1K (mem_list->max_concurrent_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", - running_object_count, - ROUND_UP_TO_1K (running_object_size))); + ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", + running_object_count, + ROUND_UP_TO_1K (running_object_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", - running_alloc_count, - ROUND_UP_TO_1K (running_alloc_size))); + ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", + running_alloc_count, + ROUND_UP_TO_1K (running_alloc_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Current Nodes", - acpi_gbl_current_node_count, - ROUND_UP_TO_1K (acpi_gbl_current_node_size))); + ("%30s: %4d (%3d Kb)\n", "Current Nodes", + acpi_gbl_current_node_count, + ROUND_UP_TO_1K (acpi_gbl_current_node_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, - ("%30s: %4d (%3d Kb)\n", "Max Nodes", - acpi_gbl_max_concurrent_node_count, - ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * - sizeof (struct acpi_namespace_node))))); + ("%30s: %4d (%3d Kb)\n", "Max Nodes", + acpi_gbl_max_concurrent_node_count, + ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * + sizeof (struct acpi_namespace_node))))); */ return_VOID; } diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index f9c8f9ce1f0f..9f3f0a1591f6 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -154,7 +154,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer) * Populate the return buffer */ info_ptr = (struct acpi_system_info *)out_buffer->pointer; - info_ptr->acpi_ca_version = ACPI_CA_VERSION; /* System flags (ACPI capabilities) */ @@ -216,7 +215,6 @@ acpi_status acpi_get_statistics(struct acpi_statistics *stats) /* Other counters */ stats->method_count = acpi_method_count; - return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 98d578753101..f6cbaf451dbf 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -117,6 +117,7 @@ acpi_exception(const char *module_name, acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ", acpi_format_exception(status)); } + va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index a7137ec28447..e38facd3e32f 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -147,6 +147,28 @@ acpi_status __init acpi_enable_subsystem(u32 flags) ACPI_FUNCTION_TRACE(acpi_enable_subsystem); + /* + * The early initialization phase is complete. The namespace is loaded, + * and we can now support address spaces other than Memory, I/O, and + * PCI_Config. + */ + acpi_gbl_early_initialization = FALSE; + + /* + * Install the default operation region handlers. These are the + * handlers that are defined by the ACPI specification to be + * "always accessible" -- namely, system_memory, system_IO, and + * PCI_Config. This also means that no _REG methods need to be + * run for these address spaces. We need to have these handlers + * installed before any AML code can be executed, especially any + * module-level code (11/2015). + */ + status = acpi_ev_install_region_handlers(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "During Region initialization")); + return_ACPI_STATUS(status); + } #if (!ACPI_REDUCED_HARDWARE) /* Enable ACPI mode */ @@ -175,23 +197,7 @@ acpi_status __init acpi_enable_subsystem(u32 flags) return_ACPI_STATUS(status); } } -#endif /* !ACPI_REDUCED_HARDWARE */ - - /* - * Install the default op_region handlers. These are installed unless - * other handlers have already been installed via the - * install_address_space_handler interface. - */ - if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Installing default address space handlers\n")); - status = acpi_ev_install_region_handlers(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } -#if (!ACPI_REDUCED_HARDWARE) /* * Initialize ACPI Event handling (Fixed and General Purpose) * @@ -261,6 +267,7 @@ acpi_status __init acpi_initialize_objects(u32 flags) * initialized, even if they contain executable AML (see the call to * acpi_ns_initialize_objects below). */ + acpi_gbl_reg_methods_enabled = TRUE; if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Executing _REG OpRegion methods\n")); @@ -285,8 +292,14 @@ acpi_status __init acpi_initialize_objects(u32 flags) * outside of any control method is wrapped with a temporary control * method object and placed on a global list. The methods on this list * are executed below. + * + * This case executes the module-level code for all tables only after + * all of the tables have been loaded. It is a legacy option and is + * not compatible with other ACPI implementations. See acpi_ns_load_table. */ - acpi_ns_exec_module_code_list(); + if (acpi_gbl_group_module_level_code) { + acpi_ns_exec_module_code_list(); + } /* * Initialize the objects that remain uninitialized. This runs the diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c index f2606af3364c..95d6123a7010 100644 --- a/drivers/acpi/acpica/utxfmutex.c +++ b/drivers/acpi/acpica/utxfmutex.c @@ -89,9 +89,9 @@ acpi_ut_get_mutex_object(acpi_handle handle, mutex_node = handle; if (pathname != NULL) { - status = acpi_get_handle(handle, pathname, - ACPI_CAST_PTR(acpi_handle, - &mutex_node)); + status = + acpi_get_handle(handle, pathname, + ACPI_CAST_PTR(acpi_handle, &mutex_node)); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index a212cefae524..891c42d1cd65 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -180,14 +180,15 @@ static void acpi_print_osc_error(acpi_handle handle, int i; if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) - printk(KERN_DEBUG "%s\n", error); + printk(KERN_DEBUG "%s: %s\n", context->uuid_str, error); else { - printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error); + printk(KERN_DEBUG "%s (%s): %s\n", + (char *)buffer.pointer, context->uuid_str, error); kfree(buffer.pointer); } - printk(KERN_DEBUG"_OSC request data:"); + printk(KERN_DEBUG "_OSC request data:"); for (i = 0; i < context->cap.length; i += sizeof(u32)) - printk("%x ", *((u32 *)(context->cap.pointer + i))); + printk(" %x", *((u32 *)(context->cap.pointer + i))); printk("\n"); } @@ -1094,6 +1095,7 @@ static int __init acpi_init(void) acpi_debugfs_init(); acpi_sleep_proc_init(); acpi_wakeup_device_init(); + acpi_debugger_init(); return 0; } diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 707cf6213bc2..b9afb47db7ed 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -104,7 +104,7 @@ static void acpi_expose_nondev_subnodes(struct kobject *kobj, init_completion(&dn->kobj_done); ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype, - kobj, dn->name); + kobj, "%s", dn->name); if (ret) acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret); else diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c index fa4585a6914e..ee9e0f27b2bf 100644 --- a/drivers/acpi/gsi.c +++ b/drivers/acpi/gsi.c @@ -17,25 +17,6 @@ enum acpi_irq_model_id acpi_irq_model; static struct fwnode_handle *acpi_gsi_domain_id; -static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity) -{ - switch (polarity) { - case ACPI_ACTIVE_LOW: - return trigger == ACPI_EDGE_SENSITIVE ? - IRQ_TYPE_EDGE_FALLING : - IRQ_TYPE_LEVEL_LOW; - case ACPI_ACTIVE_HIGH: - return trigger == ACPI_EDGE_SENSITIVE ? - IRQ_TYPE_EDGE_RISING : - IRQ_TYPE_LEVEL_HIGH; - case ACPI_ACTIVE_BOTH: - if (trigger == ACPI_EDGE_SENSITIVE) - return IRQ_TYPE_EDGE_BOTH; - default: - return IRQ_TYPE_NONE; - } -} - /** * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI * @gsi: GSI IRQ number to map @@ -82,7 +63,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, fwspec.fwnode = acpi_gsi_domain_id; fwspec.param[0] = gsi; - fwspec.param[1] = acpi_gsi_get_irq_type(trigger, polarity); + fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; return irq_create_fwspec_mapping(&fwspec); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 11d87bf67e73..1e6833a5cd44 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -86,6 +86,14 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) +extern struct list_head acpi_bus_id_list; + +struct acpi_device_bus_id { + char bus_id[15]; + unsigned int instance_no; + struct list_head node; +}; + int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)); void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index e7ed39bab97d..aa45d4802707 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) if (!dev->driver) { /* dev->driver may be null if we're being removed */ dev_dbg(dev, "%s: no driver found for dev\n", __func__); - return; + goto out_unlock; } if (!acpi_desc) { diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 32d684af0ec7..67da6fb72274 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -220,6 +220,7 @@ void acpi_os_printf(const char *fmt, ...) acpi_os_vprintf(fmt, args); va_end(args); } +EXPORT_SYMBOL(acpi_os_printf); void acpi_os_vprintf(const char *fmt, va_list args) { @@ -234,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args) printk(KERN_CONT "%s", buffer); } #else - printk(KERN_CONT "%s", buffer); + if (acpi_debugger_write_log(buffer) < 0) + printk(KERN_CONT "%s", buffer); #endif } @@ -364,6 +366,19 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) iounmap(vaddr); } +/** + * acpi_os_map_iomem - Get a virtual address for a given physical address range. + * @phys: Start of the physical address range to map. + * @size: Size of the physical address range to map. + * + * Look up the given physical address range in the list of existing ACPI memory + * mappings. If found, get a reference to it and return a pointer to it (its + * virtual address). If not found, map it, add it to that list and return a + * pointer to it. + * + * During early init (when acpi_gbl_permanent_mmap has not been set yet) this + * routine simply calls __acpi_map_table() to get the job done. + */ void __iomem *__init_refok acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) { @@ -439,6 +454,20 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) } } +/** + * acpi_os_unmap_iomem - Drop a memory mapping reference. + * @virt: Start of the address range to drop a reference to. + * @size: Size of the address range to drop a reference to. + * + * Look up the given virtual address range in the list of existing ACPI memory + * mappings, drop a reference to it and unmap it if there are no more active + * references to it. + * + * During early init (when acpi_gbl_permanent_mmap has not been set yet) this + * routine simply calls __acpi_unmap_table() to get the job done. Since + * __acpi_unmap_table() is an __init function, the __ref annotation is needed + * here. + */ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) { struct acpi_ioremap *map; @@ -1101,6 +1130,200 @@ static void acpi_os_execute_deferred(struct work_struct *work) kfree(dpc); } +#ifdef CONFIG_ACPI_DEBUGGER +static struct acpi_debugger acpi_debugger; +static bool acpi_debugger_initialized; + +int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops) +{ + int ret = 0; + + mutex_lock(&acpi_debugger.lock); + if (acpi_debugger.ops) { + ret = -EBUSY; + goto err_lock; + } + + acpi_debugger.owner = owner; + acpi_debugger.ops = ops; + +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} +EXPORT_SYMBOL(acpi_register_debugger); + +void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) +{ + mutex_lock(&acpi_debugger.lock); + if (ops == acpi_debugger.ops) { + acpi_debugger.ops = NULL; + acpi_debugger.owner = NULL; + } + mutex_unlock(&acpi_debugger.lock); +} +EXPORT_SYMBOL(acpi_unregister_debugger); + +int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) +{ + int ret; + int (*func)(acpi_osd_exec_callback, void *); + struct module *owner; + + if (!acpi_debugger_initialized) + return -ENODEV; + mutex_lock(&acpi_debugger.lock); + if (!acpi_debugger.ops) { + ret = -ENODEV; + goto err_lock; + } + if (!try_module_get(acpi_debugger.owner)) { + ret = -ENODEV; + goto err_lock; + } + func = acpi_debugger.ops->create_thread; + owner = acpi_debugger.owner; + mutex_unlock(&acpi_debugger.lock); + + ret = func(function, context); + + mutex_lock(&acpi_debugger.lock); + module_put(owner); +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} + +ssize_t acpi_debugger_write_log(const char *msg) +{ + ssize_t ret; + ssize_t (*func)(const char *); + struct module *owner; + + if (!acpi_debugger_initialized) + return -ENODEV; + mutex_lock(&acpi_debugger.lock); + if (!acpi_debugger.ops) { + ret = -ENODEV; + goto err_lock; + } + if (!try_module_get(acpi_debugger.owner)) { + ret = -ENODEV; + goto err_lock; + } + func = acpi_debugger.ops->write_log; + owner = acpi_debugger.owner; + mutex_unlock(&acpi_debugger.lock); + + ret = func(msg); + + mutex_lock(&acpi_debugger.lock); + module_put(owner); +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} + +ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) +{ + ssize_t ret; + ssize_t (*func)(char *, size_t); + struct module *owner; + + if (!acpi_debugger_initialized) + return -ENODEV; + mutex_lock(&acpi_debugger.lock); + if (!acpi_debugger.ops) { + ret = -ENODEV; + goto err_lock; + } + if (!try_module_get(acpi_debugger.owner)) { + ret = -ENODEV; + goto err_lock; + } + func = acpi_debugger.ops->read_cmd; + owner = acpi_debugger.owner; + mutex_unlock(&acpi_debugger.lock); + + ret = func(buffer, buffer_length); + + mutex_lock(&acpi_debugger.lock); + module_put(owner); +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} + +int acpi_debugger_wait_command_ready(void) +{ + int ret; + int (*func)(bool, char *, size_t); + struct module *owner; + + if (!acpi_debugger_initialized) + return -ENODEV; + mutex_lock(&acpi_debugger.lock); + if (!acpi_debugger.ops) { + ret = -ENODEV; + goto err_lock; + } + if (!try_module_get(acpi_debugger.owner)) { + ret = -ENODEV; + goto err_lock; + } + func = acpi_debugger.ops->wait_command_ready; + owner = acpi_debugger.owner; + mutex_unlock(&acpi_debugger.lock); + + ret = func(acpi_gbl_method_executing, + acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); + + mutex_lock(&acpi_debugger.lock); + module_put(owner); +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} + +int acpi_debugger_notify_command_complete(void) +{ + int ret; + int (*func)(void); + struct module *owner; + + if (!acpi_debugger_initialized) + return -ENODEV; + mutex_lock(&acpi_debugger.lock); + if (!acpi_debugger.ops) { + ret = -ENODEV; + goto err_lock; + } + if (!try_module_get(acpi_debugger.owner)) { + ret = -ENODEV; + goto err_lock; + } + func = acpi_debugger.ops->notify_command_complete; + owner = acpi_debugger.owner; + mutex_unlock(&acpi_debugger.lock); + + ret = func(); + + mutex_lock(&acpi_debugger.lock); + module_put(owner); +err_lock: + mutex_unlock(&acpi_debugger.lock); + return ret; +} + +int __init acpi_debugger_init(void) +{ + mutex_init(&acpi_debugger.lock); + acpi_debugger_initialized = true; + return 0; +} +#endif + /******************************************************************************* * * FUNCTION: acpi_os_execute @@ -1127,6 +1350,15 @@ acpi_status acpi_os_execute(acpi_execute_type type, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); + if (type == OSL_DEBUGGER_MAIN_THREAD) { + ret = acpi_debugger_create_thread(function, context); + if (ret) { + pr_err("Call to kthread_create() failed.\n"); + status = AE_ERROR; + } + goto out_thread; + } + /* * Allocate/initialize DPC structure. Note that this memory will be * freed by the callee. The kernel handles the work_struct list in a @@ -1151,11 +1383,17 @@ acpi_status acpi_os_execute(acpi_execute_type type, if (type == OSL_NOTIFY_HANDLER) { queue = kacpi_notify_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else { + } else if (type == OSL_GPE_HANDLER) { queue = kacpid_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); + } else { + pr_err("Unsupported os_execute type %d.\n", type); + status = AE_ERROR; } + if (ACPI_FAILURE(status)) + goto err_workqueue; + /* * On some machines, a software-initiated SMI causes corruption unless * the SMI runs on CPU 0. An SMI can be initiated by any AML, but @@ -1164,13 +1402,15 @@ acpi_status acpi_os_execute(acpi_execute_type type, * queueing on CPU 0. */ ret = queue_work_on(0, queue, &dpc->work); - if (!ret) { printk(KERN_ERR PREFIX "Call to queue_work() failed.\n"); status = AE_ERROR; - kfree(dpc); } +err_workqueue: + if (ACPI_FAILURE(status)) + kfree(dpc); +out_thread: return status; } EXPORT_SYMBOL(acpi_os_execute); @@ -1358,10 +1598,39 @@ acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) chars = strlen(buffer) - 1; buffer[chars] = '\0'; } +#else + int ret; + + ret = acpi_debugger_read_cmd(buffer, buffer_length); + if (ret < 0) + return AE_ERROR; + if (bytes_read) + *bytes_read = ret; #endif return AE_OK; } +EXPORT_SYMBOL(acpi_os_get_line); + +acpi_status acpi_os_wait_command_ready(void) +{ + int ret; + + ret = acpi_debugger_wait_command_ready(); + if (ret < 0) + return AE_ERROR; + return AE_OK; +} + +acpi_status acpi_os_notify_command_complete(void) +{ + int ret; + + ret = acpi_debugger_notify_command_complete(); + if (ret < 0) + return AE_ERROR; + return AE_OK; +} acpi_status acpi_os_signal(u32 function, void *info) { diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index c9336751e5e3..d30184c7f3bc 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -131,9 +131,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, quirk = &prt_quirks[i]; /* All current quirks involve link devices, not GSIs */ - if (!prt->source) - continue; - if (dmi_check_system(quirk->system) && entry->id.segment == quirk->segment && entry->id.bus == quirk->bus && diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 7c8408b946ca..fa2863567eed 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -4,6 +4,7 @@ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> + * Copyright (c) 2015, The Linux Foundation. All rights reserved. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -67,12 +68,12 @@ static struct acpi_scan_handler pci_link_handler = { * later even the link is disable. Instead, we just repick the active irq */ struct acpi_pci_link_irq { - u8 active; /* Current IRQ */ + u32 active; /* Current IRQ */ u8 triggering; /* All IRQs */ u8 polarity; /* All IRQs */ u8 resource_type; u8 possible_count; - u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; + u32 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; u8 initialized:1; u8 reserved:7; }; @@ -437,7 +438,6 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) * enabled system. */ -#define ACPI_MAX_IRQS 256 #define ACPI_MAX_ISA_IRQ 16 #define PIRQ_PENALTY_PCI_AVAILABLE (0) @@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) #define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) #define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) -static int acpi_irq_penalty[ACPI_MAX_IRQS] = { +static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ @@ -464,9 +464,68 @@ static int acpi_irq_penalty[ACPI_MAX_IRQS] = { PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ - /* >IRQ15 */ }; +struct irq_penalty_info { + int irq; + int penalty; + struct list_head node; +}; + +static LIST_HEAD(acpi_irq_penalty_list); + +static int acpi_irq_get_penalty(int irq) +{ + struct irq_penalty_info *irq_info; + + if (irq < ACPI_MAX_ISA_IRQ) + return acpi_irq_isa_penalty[irq]; + + list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) { + if (irq_info->irq == irq) + return irq_info->penalty; + } + + return 0; +} + +static int acpi_irq_set_penalty(int irq, int new_penalty) +{ + struct irq_penalty_info *irq_info; + + /* see if this is a ISA IRQ */ + if (irq < ACPI_MAX_ISA_IRQ) { + acpi_irq_isa_penalty[irq] = new_penalty; + return 0; + } + + /* next, try to locate from the dynamic list */ + list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) { + if (irq_info->irq == irq) { + irq_info->penalty = new_penalty; + return 0; + } + } + + /* nope, let's allocate a slot for this IRQ */ + irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL); + if (!irq_info) + return -ENOMEM; + + irq_info->irq = irq; + irq_info->penalty = new_penalty; + list_add_tail(&irq_info->node, &acpi_irq_penalty_list); + + return 0; +} + +static void acpi_irq_add_penalty(int irq, int penalty) +{ + int curpen = acpi_irq_get_penalty(irq); + + acpi_irq_set_penalty(irq, curpen + penalty); +} + int __init acpi_irq_penalty_init(void) { struct acpi_pci_link *link; @@ -487,15 +546,16 @@ int __init acpi_irq_penalty_init(void) link->irq.possible_count; for (i = 0; i < link->irq.possible_count; i++) { - if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) - acpi_irq_penalty[link->irq. - possible[i]] += - penalty; + if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { + int irqpos = link->irq.possible[i]; + + acpi_irq_add_penalty(irqpos, penalty); + } } } else if (link->irq.active) { - acpi_irq_penalty[link->irq.active] += - PIRQ_PENALTY_PCI_POSSIBLE; + acpi_irq_add_penalty(link->irq.active, + PIRQ_PENALTY_PCI_POSSIBLE); } } @@ -547,12 +607,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) * the use of IRQs 9, 10, 11, and >15. */ for (i = (link->irq.possible_count - 1); i >= 0; i--) { - if (acpi_irq_penalty[irq] > - acpi_irq_penalty[link->irq.possible[i]]) + if (acpi_irq_get_penalty(irq) > + acpi_irq_get_penalty(link->irq.possible[i])) irq = link->irq.possible[i]; } } - if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " "Try pci=noacpi or acpi=off\n", acpi_device_name(link->device), @@ -568,7 +628,8 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) acpi_device_bid(link->device)); return -ENODEV; } else { - acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; + acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); + printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active); @@ -778,7 +839,7 @@ static void acpi_pci_link_remove(struct acpi_device *device) } /* - * modify acpi_irq_penalty[] from cmdline + * modify penalty from cmdline */ static int __init acpi_irq_penalty_update(char *str, int used) { @@ -796,13 +857,10 @@ static int __init acpi_irq_penalty_update(char *str, int used) if (irq < 0) continue; - if (irq >= ARRAY_SIZE(acpi_irq_penalty)) - continue; - if (used) - acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; + acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); else - acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE; + acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); if (retval != 2) /* no next number */ break; @@ -819,18 +877,15 @@ static int __init acpi_irq_penalty_update(char *str, int used) */ void acpi_penalize_isa_irq(int irq, int active) { - if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { - if (active) - acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; - else - acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; - } + if (irq >= 0) + acpi_irq_add_penalty(irq, active ? + PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); } bool acpi_isa_irq_available(int irq) { - return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || - acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); + return irq >= 0 && + (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); } /* @@ -840,13 +895,18 @@ bool acpi_isa_irq_available(int irq) */ void acpi_penalize_sci_irq(int irq, int trigger, int polarity) { - if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { - if (trigger != ACPI_MADT_TRIGGER_LEVEL || - polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) - acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS; - else - acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; - } + int penalty; + + if (irq < 0) + return; + + if (trigger != ACPI_MADT_TRIGGER_LEVEL || + polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) + penalty = PIRQ_PENALTY_ISA_ALWAYS; + else + penalty = PIRQ_PENALTY_PCI_USING; + + acpi_irq_add_penalty(irq, penalty); } /* diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index f4e02ae93f58..11154a330f07 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr, goto err_remove_sysfs_thermal; } - sysfs_remove_link(&pr->cdev->device.kobj, "device"); + return 0; + err_remove_sysfs_thermal: sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); err_thermal_unregister: diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 88f4306744c0..2aee41655ce9 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -346,7 +346,7 @@ void acpi_free_properties(struct acpi_device *adev) * * Return: %0 if property with @name has been found (success), * %-EINVAL if the arguments are invalid, - * %-ENODATA if the property doesn't exist, + * %-EINVAL if the property doesn't exist, * %-EPROTO if the property value type doesn't match @type. */ static int acpi_data_get_property(struct acpi_device_data *data, @@ -360,7 +360,7 @@ static int acpi_data_get_property(struct acpi_device_data *data, return -EINVAL; if (!data->pointer || !data->properties) - return -ENODATA; + return -EINVAL; properties = data->properties; for (i = 0; i < properties->package.count; i++) { @@ -375,13 +375,13 @@ static int acpi_data_get_property(struct acpi_device_data *data, if (!strcmp(name, propname->string.pointer)) { if (type != ACPI_TYPE_ANY && propvalue->type != type) return -EPROTO; - else if (obj) + if (obj) *obj = propvalue; return 0; } } - return -ENODATA; + return -EINVAL; } /** @@ -439,7 +439,7 @@ int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, * * Return: %0 if array property (package) with @name has been found (success), * %-EINVAL if the arguments are invalid, - * %-ENODATA if the property doesn't exist, + * %-EINVAL if the property doesn't exist, * %-EPROTO if the property is not a package or the type of its elements * doesn't match @type. */ diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index cdc5c2599beb..d02fd53042a5 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <linux/ioport.h> #include <linux/slab.h> +#include <linux/irq.h> #ifdef CONFIG_X86 #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) @@ -336,6 +337,31 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) } EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); +/** + * acpi_dev_get_irq_type - Determine irq type. + * @triggering: Triggering type as provided by ACPI. + * @polarity: Interrupt polarity as provided by ACPI. + */ +unsigned int acpi_dev_get_irq_type(int triggering, int polarity) +{ + switch (polarity) { + case ACPI_ACTIVE_LOW: + return triggering == ACPI_EDGE_SENSITIVE ? + IRQ_TYPE_EDGE_FALLING : + IRQ_TYPE_LEVEL_LOW; + case ACPI_ACTIVE_HIGH: + return triggering == ACPI_EDGE_SENSITIVE ? + IRQ_TYPE_EDGE_RISING : + IRQ_TYPE_LEVEL_HIGH; + case ACPI_ACTIVE_BOTH: + if (triggering == ACPI_EDGE_SENSITIVE) + return IRQ_TYPE_EDGE_BOTH; + default: + return IRQ_TYPE_NONE; + } +} +EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type); + static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) { res->start = gsi; diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index cb3dedb1beae..ad0b13ad4bbb 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -417,11 +417,11 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery) if ((value & 0xf000) != sel) { value &= 0x0fff; value |= sel; - ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, + ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&value, 2); - if (ret) - goto end; + if (ret) + goto end; } } ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_BATTERY, diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 78d5f02a073b..407a3760e8de 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -39,7 +39,7 @@ static const char *dummy_hid = "device"; static LIST_HEAD(acpi_dep_list); static DEFINE_MUTEX(acpi_dep_list_lock); -static LIST_HEAD(acpi_bus_id_list); +LIST_HEAD(acpi_bus_id_list); static DEFINE_MUTEX(acpi_scan_lock); static LIST_HEAD(acpi_scan_handlers_list); DEFINE_MUTEX(acpi_device_lock); @@ -52,12 +52,6 @@ struct acpi_dep_data { acpi_handle slave; }; -struct acpi_device_bus_id{ - char bus_id[15]; - unsigned int instance_no; - struct list_head node; -}; - void acpi_scan_lock_acquire(void) { mutex_lock(&acpi_scan_lock); @@ -471,10 +465,24 @@ static void acpi_device_release(struct device *dev) static void acpi_device_del(struct acpi_device *device) { + struct acpi_device_bus_id *acpi_device_bus_id; + mutex_lock(&acpi_device_lock); if (device->parent) list_del(&device->node); + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) + if (!strcmp(acpi_device_bus_id->bus_id, + acpi_device_hid(device))) { + if (acpi_device_bus_id->instance_no > 0) + acpi_device_bus_id->instance_no--; + else { + list_del(&acpi_device_bus_id->node); + kfree(acpi_device_bus_id); + } + break; + } + list_del(&device->wakeup_list); mutex_unlock(&acpi_device_lock); @@ -1461,7 +1469,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, *type = ACPI_BUS_TYPE_DEVICE; status = acpi_bus_get_status_handle(handle, sta); if (ACPI_FAILURE(status)) - return -ENODEV; + *sta = 0; break; case ACPI_TYPE_PROCESSOR: *type = ACPI_BUS_TYPE_PROCESSOR; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 0d94621dc856..9cb975200cac 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -61,7 +61,7 @@ static int acpi_sleep_prepare(u32 acpi_state) if (acpi_state == ACPI_STATE_S3) { if (!acpi_wakeup_address) return -EFAULT; - acpi_set_firmware_waking_vector(acpi_wakeup_address); + acpi_set_waking_vector(acpi_wakeup_address); } ACPI_FLUSH_CPU_CACHE(); @@ -410,7 +410,7 @@ static void acpi_pm_finish(void) acpi_leave_sleep_state(acpi_state); /* reset firmware waking vector */ - acpi_set_firmware_waking_vector((acpi_physical_address) 0); + acpi_set_waking_vector(0); acpi_target_sleep_state = ACPI_STATE_S0; diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index c797ffa568d5..a9cc34e663f9 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -6,3 +6,9 @@ extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; extern void acpi_resume_power_resources(void); + +static inline acpi_status acpi_set_waking_vector(u32 wakeup_address) +{ + return acpi_set_firmware_waking_vector( + (acpi_physical_address)wakeup_address, 0); +} diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 475c9079bf85..f2f9873bb5c3 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -29,6 +29,7 @@ #include <linux/dynamic_debug.h> #include "internal.h" +#include "sleep.h" #define _COMPONENT ACPI_BUS_COMPONENT ACPI_MODULE_NAME("utils"); @@ -709,6 +710,36 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs) } EXPORT_SYMBOL(acpi_check_dsm); +/** + * acpi_dev_present - Detect presence of a given ACPI device in the system. + * @hid: Hardware ID of the device. + * + * Return %true if the device was present at the moment of invocation. + * Note that if the device is pluggable, it may since have disappeared. + * + * For this function to work, acpi_bus_scan() must have been executed + * which happens in the subsys_initcall() subsection. Hence, do not + * call from a subsys_initcall() or earlier (use acpi_get_devices() + * instead). Calling from module_init() is fine (which is synonymous + * with device_initcall()). + */ +bool acpi_dev_present(const char *hid) +{ + struct acpi_device_bus_id *acpi_device_bus_id; + bool found = false; + + mutex_lock(&acpi_device_lock); + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) + if (!strcmp(acpi_device_bus_id->bus_id, hid)) { + found = true; + break; + } + mutex_unlock(&acpi_device_lock); + + return found; +} +EXPORT_SYMBOL(acpi_dev_present); + /* * acpi_backlight= handling, this is done here rather then in video_detect.c * because __setup cannot be used in modules. diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index daaf1c4e1e0f..90e2d54be526 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -250,6 +250,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), }, }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=108971 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 530U4E/540U4E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"), + }, + }, /* Non win8 machines which need native backlight nevertheless */ { @@ -279,6 +288,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"), }, }, + { + .callback = video_detect_force_native, + .ident = "Dell Vostro V131", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), + }, + }, { }, }; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 6aaa3f81755b..861643ea91b5 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -100,7 +100,7 @@ config SATA_AHCI_PLATFORM config AHCI_BRCMSTB tristate "Broadcom STB AHCI SATA support" - depends on ARCH_BRCMSTB + depends on ARCH_BRCMSTB || BMIPS_GENERIC help This option enables support for the AHCI SATA3 controller found on STB SoC's. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index cdfbcc54821f..594fcabd22cd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1306,15 +1306,13 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) #endif /* - * ahci_init_msix() only implements single MSI-X support, not multiple - * MSI-X per-port interrupts. This is needed for host controllers that only - * have MSI-X support implemented, but no MSI or intx. + * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer + * to single msi. */ static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports, - struct ahci_host_priv *hpriv) + struct ahci_host_priv *hpriv, unsigned long flags) { - int rc, nvec; - struct msix_entry entry = {}; + int nvec, i, rc; /* Do not init MSI-X if MSI is disabled for the device */ if (hpriv->flags & AHCI_HFLAG_NO_MSI) @@ -1324,22 +1322,39 @@ static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports, if (nvec < 0) return nvec; - if (!nvec) { + /* + * Proper MSI-X implementations will have a vector per-port. + * Barring that, we prefer single-MSI over single-MSIX. If this + * check fails (not enough MSI-X vectors for all ports) we will + * be called again with the flag clear iff ahci_init_msi() + * fails. + */ + if (flags & AHCI_HFLAG_MULTI_MSIX) { + if (nvec < n_ports) + return -ENODEV; + nvec = n_ports; + } else if (nvec) { + nvec = 1; + } else { + /* + * Emit dev_err() since this was the non-legacy irq + * method of last resort. + */ rc = -ENODEV; goto fail; } - /* - * There can be more than one vector (e.g. for error detection or - * hdd hotplug). Only the first vector (entry.entry = 0) is used. - */ - rc = pci_enable_msix_exact(pdev, &entry, 1); + for (i = 0; i < nvec; i++) + hpriv->msix[i].entry = i; + rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec); if (rc < 0) goto fail; - hpriv->irq = entry.vector; + if (nvec > 1) + hpriv->flags |= AHCI_HFLAG_MULTI_MSIX; + hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */ - return 1; + return nvec; fail: dev_err(&pdev->dev, "failed to enable MSI-X with error %d, # of vectors: %d\n", @@ -1403,20 +1418,25 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, { int nvec; + /* + * Try to enable per-port MSI-X. If the host is not capable + * fall back to single MSI before finally attempting single + * MSI-X. + */ + nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX); + if (nvec >= 0) + return nvec; + nvec = ahci_init_msi(pdev, n_ports, hpriv); if (nvec >= 0) return nvec; - /* - * Currently, MSI-X support only implements single IRQ mode and - * exists for controllers which can't do other types of IRQ. Only - * set it up if MSI fails. - */ - nvec = ahci_init_msix(pdev, n_ports, hpriv); + /* try single-msix */ + nvec = ahci_init_msix(pdev, n_ports, hpriv, 0); if (nvec >= 0) return nvec; - /* lagacy intx interrupts */ + /* legacy intx interrupts */ pci_intx(pdev, 1); hpriv->irq = pdev->irq; @@ -1578,7 +1598,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!host) return -ENOMEM; host->private_data = hpriv; - + hpriv->msix = devm_kzalloc(&pdev->dev, + sizeof(struct msix_entry) * n_ports, GFP_KERNEL); + if (!hpriv->msix) + return -ENOMEM; ahci_init_interrupts(pdev, n_ports, hpriv); if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 45586c1dbbdc..a4faa438889c 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -35,6 +35,7 @@ #ifndef _AHCI_H #define _AHCI_H +#include <linux/pci.h> #include <linux/clk.h> #include <linux/libata.h> #include <linux/phy/phy.h> @@ -237,11 +238,18 @@ enum { AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on port start (wait until error-handling stage) */ - AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as Edge Triggered */ +#ifdef CONFIG_PCI_MSI + AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ + AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ +#else + /* compile out MSI infrastructure */ + AHCI_HFLAG_MULTI_MSI = 0, + AHCI_HFLAG_MULTI_MSIX = 0, +#endif /* ap->flags bits */ @@ -308,7 +316,6 @@ struct ahci_port_priv { unsigned int ncq_saw_d2h:1; unsigned int ncq_saw_dmas:1; unsigned int ncq_saw_sdb:1; - atomic_t intr_status; /* interrupts to handle */ spinlock_t lock; /* protects parent ata_port */ u32 intr_mask; /* interrupts to enable */ bool fbs_supported; /* set iff FBS is supported */ @@ -343,6 +350,7 @@ struct ahci_host_priv { * the PHY position in this array. */ struct phy **phys; + struct msix_entry *msix; /* Optional MSI-X support */ unsigned nports; /* Number of ports */ void *plat_data; /* Other platform data */ unsigned int irq; /* interrupt line */ @@ -354,6 +362,21 @@ struct ahci_host_priv { void (*start_engine)(struct ata_port *ap); }; +#ifdef CONFIG_PCI_MSI +static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) +{ + if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX) + return hpriv->msix[port].vector; + else + return hpriv->irq + port; +} +#else +static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) +{ + return hpriv->irq; +} +#endif + extern int ahci_ignore_sss; extern struct device_attribute *ahci_shost_attrs[]; diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c index 14b7305d2ba0..b36cae2fd04b 100644 --- a/drivers/ata/ahci_brcmstb.c +++ b/drivers/ata/ahci_brcmstb.c @@ -52,8 +52,10 @@ #define SATA_TOP_CTRL_2_PHY_GLOBAL_RESET BIT(14) #define SATA_TOP_CTRL_PHY_OFFS 0x8 #define SATA_TOP_MAX_PHYS 2 -#define SATA_TOP_CTRL_SATA_TP_OUT 0x1c -#define SATA_TOP_CTRL_CLIENT_INIT_CTRL 0x20 + +#define SATA_FIRST_PORT_CTRL 0x700 +#define SATA_NEXT_PORT_CTRL_OFFSET 0x80 +#define SATA_PORT_PCTRL6(reg_base) (reg_base + 0x18) /* On big-endian MIPS, buses are reversed to big endian, so switch them back */ #if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN) @@ -69,14 +71,21 @@ (DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \ (MMIO_ENDIAN << MMIO_ENDIAN_SHIFT)) +enum brcm_ahci_quirks { + BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), +}; + struct brcm_ahci_priv { struct device *dev; void __iomem *top_ctrl; u32 port_mask; + u32 quirks; }; static const struct ata_port_info ahci_brcm_port_info = { - .flags = AHCI_FLAG_COMMON, + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, + .link_flags = ATA_LFLAG_NO_DB_DELAY, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_platform_ops, @@ -107,6 +116,34 @@ static inline void brcm_sata_writereg(u32 val, void __iomem *addr) writel_relaxed(val, addr); } +static void brcm_sata_alpm_init(struct ahci_host_priv *hpriv) +{ + struct brcm_ahci_priv *priv = hpriv->plat_data; + u32 bus_ctrl, port_ctrl, host_caps; + int i; + + /* Enable support for ALPM */ + bus_ctrl = brcm_sata_readreg(priv->top_ctrl + + SATA_TOP_CTRL_BUS_CTRL); + brcm_sata_writereg(bus_ctrl | OVERRIDE_HWINIT, + priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); + host_caps = readl(hpriv->mmio + HOST_CAP); + writel(host_caps | HOST_CAP_ALPM, hpriv->mmio); + brcm_sata_writereg(bus_ctrl, priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); + + /* + * Adjust timeout to allow PLL sufficient time to lock while waking + * up from slumber mode. + */ + for (i = 0, port_ctrl = SATA_FIRST_PORT_CTRL; + i < SATA_TOP_MAX_PHYS; + i++, port_ctrl += SATA_NEXT_PORT_CTRL_OFFSET) { + if (priv->port_mask & BIT(i)) + writel(0xff1003fc, + hpriv->mmio + SATA_PORT_PCTRL6(port_ctrl)); + } +} + static void brcm_sata_phy_enable(struct brcm_ahci_priv *priv, int port) { void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL + @@ -114,6 +151,9 @@ static void brcm_sata_phy_enable(struct brcm_ahci_priv *priv, int port) void __iomem *p; u32 reg; + if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE) + return; + /* clear PHY_DEFAULT_POWER_STATE */ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1; reg = brcm_sata_readreg(p); @@ -143,6 +183,9 @@ static void brcm_sata_phy_disable(struct brcm_ahci_priv *priv, int port) void __iomem *p; u32 reg; + if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE) + return; + /* power-off the PHY digital logic */ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2; reg = brcm_sata_readreg(p); @@ -230,6 +273,7 @@ static int brcm_ahci_resume(struct device *dev) brcm_sata_init(priv); brcm_sata_phys_enable(priv); + brcm_sata_alpm_init(hpriv); return ahci_platform_resume(dev); } #endif @@ -256,6 +300,11 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); + if (of_device_is_compatible(dev->of_node, "brcm,bcm7425-ahci")) { + priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; + priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + } + brcm_sata_init(priv); priv->port_mask = brcm_ahci_get_portmask(pdev, priv); @@ -269,10 +318,15 @@ static int brcm_ahci_probe(struct platform_device *pdev) return PTR_ERR(hpriv); hpriv->plat_data = priv; + brcm_sata_alpm_init(hpriv); + ret = ahci_platform_enable_resources(hpriv); if (ret) return ret; + if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) + hpriv->flags |= AHCI_HFLAG_NO_NCQ; + ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) @@ -300,6 +354,7 @@ static int brcm_ahci_remove(struct platform_device *pdev) } static const struct of_device_id ahci_of_match[] = { + {.compatible = "brcm,bcm7425-ahci"}, {.compatible = "brcm,bcm7445-ahci"}, {}, }; diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index d0f9de96e4ea..7bdee9bd8786 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -34,14 +34,20 @@ /* port register default value */ #define AHCI_PORT_PHY_1_CFG 0xa003fffe -#define AHCI_PORT_PHY_2_CFG 0x28183411 -#define AHCI_PORT_PHY_3_CFG 0x0e081004 -#define AHCI_PORT_PHY_4_CFG 0x00480811 -#define AHCI_PORT_PHY_5_CFG 0x192c96a4 -#define AHCI_PORT_TRANS_CFG 0x08000025 +#define AHCI_PORT_TRANS_CFG 0x08000029 + +/* for ls1021a */ +#define LS1021A_PORT_PHY2 0x28183414 +#define LS1021A_PORT_PHY3 0x0e080e06 +#define LS1021A_PORT_PHY4 0x064a080b +#define LS1021A_PORT_PHY5 0x2aa86470 #define SATA_ECC_DISABLE 0x00020000 +/* for ls1043a */ +#define LS1043A_PORT_PHY2 0x28184d1f +#define LS1043A_PORT_PHY3 0x0e081509 + enum ahci_qoriq_type { AHCI_LS1021A, AHCI_LS1043A, @@ -151,16 +157,23 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) case AHCI_LS1021A: writel(SATA_ECC_DISABLE, qpriv->ecc_addr); writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); - writel(AHCI_PORT_PHY_2_CFG, reg_base + PORT_PHY2); - writel(AHCI_PORT_PHY_3_CFG, reg_base + PORT_PHY3); - writel(AHCI_PORT_PHY_4_CFG, reg_base + PORT_PHY4); - writel(AHCI_PORT_PHY_5_CFG, reg_base + PORT_PHY5); + writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2); + writel(LS1021A_PORT_PHY3, reg_base + PORT_PHY3); + writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4); + writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); break; case AHCI_LS1043A: + writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); + writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2); + writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3); + writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + break; + case AHCI_LS2080A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); + writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); break; } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 4665512dae44..d61740e78d6d 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -43,6 +43,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> +#include <linux/pci.h> #include "ahci.h" #include "libata.h" @@ -1804,41 +1805,24 @@ static void ahci_port_intr(struct ata_port *ap) ahci_handle_port_interrupt(ap, port_mmio, status); } -static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance) +static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) { struct ata_port *ap = dev_instance; - struct ahci_port_priv *pp = ap->private_data; void __iomem *port_mmio = ahci_port_base(ap); u32 status; - status = atomic_xchg(&pp->intr_status, 0); - if (!status) - return IRQ_NONE; - - spin_lock_bh(ap->lock); - ahci_handle_port_interrupt(ap, port_mmio, status); - spin_unlock_bh(ap->lock); - - return IRQ_HANDLED; -} - -static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance) -{ - struct ata_port *ap = dev_instance; - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_port_priv *pp = ap->private_data; - u32 status; - VPRINTK("ENTER\n"); status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); - atomic_or(status, &pp->intr_status); + spin_lock(ap->lock); + ahci_handle_port_interrupt(ap, port_mmio, status); + spin_unlock(ap->lock); VPRINTK("EXIT\n"); - return IRQ_WAKE_THREAD; + return IRQ_HANDLED; } static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) @@ -2479,9 +2463,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv, } EXPORT_SYMBOL_GPL(ahci_set_em_messages); -static int ahci_host_activate_multi_irqs(struct ata_host *host, int irq, +static int ahci_host_activate_multi_irqs(struct ata_host *host, struct scsi_host_template *sht) { + struct ahci_host_priv *hpriv = host->private_data; int i, rc; rc = ata_host_start(host); @@ -2493,6 +2478,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, int irq, */ for (i = 0; i < host->n_ports; i++) { struct ahci_port_priv *pp = host->ports[i]->private_data; + int irq = ahci_irq_vector(hpriv, i); /* Do not receive interrupts sent by dummy ports */ if (!pp) { @@ -2500,14 +2486,14 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, int irq, continue; } - rc = devm_request_threaded_irq(host->dev, irq + i, - ahci_multi_irqs_intr, - ahci_port_thread_fn, 0, - pp->irq_desc, host->ports[i]); + rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard, + 0, pp->irq_desc, host->ports[i]); + if (rc) return rc; - ata_port_desc(host->ports[i], "irq %d", irq + i); + ata_port_desc(host->ports[i], "irq %d", irq); } + return ata_host_register(host, sht); } @@ -2528,8 +2514,8 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) int irq = hpriv->irq; int rc; - if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) - rc = ahci_host_activate_multi_irqs(host, irq, sht); + if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) + rc = ahci_host_activate_multi_irqs(host, sht); else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, IRQF_SHARED, sht); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b79cb10e289e..cbb74719d2c1 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -50,6 +50,7 @@ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/timer.h> +#include <linux/time.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/suspend.h> @@ -3597,7 +3598,8 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, * immediately after resuming. Delay 200ms before * debouncing. */ - ata_msleep(link->ap, 200); + if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) + ata_msleep(link->ap, 200); /* is SControl restored correctly? */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) @@ -6223,6 +6225,7 @@ int ata_host_activate(struct ata_host *host, int irq, struct scsi_host_template *sht) { int i, rc; + char *irq_desc; rc = ata_host_start(host); if (rc) @@ -6234,8 +6237,14 @@ int ata_host_activate(struct ata_host *host, int irq, return ata_host_register(host, sht); } + irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", + dev_driver_string(host->dev), + dev_name(host->dev)); + if (!irq_desc) + return -ENOMEM; + rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, - dev_name(host->dev), host); + irq_desc, host); if (rc) return rc; @@ -6697,7 +6706,12 @@ void ata_msleep(struct ata_port *ap, unsigned int msecs) if (owns_eh) ata_eh_release(ap); - msleep(msecs); + if (msecs < 20) { + unsigned long usecs = msecs * USEC_PER_MSEC; + usleep_range(usecs, usecs + 50); + } else { + msleep(msecs); + } if (owns_eh) ata_eh_acquire(ap); diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 8804127b108c..f72d601e300a 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -854,17 +854,14 @@ static struct of_device_id sata_rcar_match[] = { .compatible = "renesas,sata-r8a7793", .data = (void *)RCAR_GEN2_SATA }, + { + .compatible = "renesas,sata-r8a7795", + .data = (void *)RCAR_GEN2_SATA + }, { }, }; MODULE_DEVICE_TABLE(of, sata_rcar_match); -static const struct platform_device_id sata_rcar_id_table[] = { - { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ - { "sata-r8a7779", RCAR_GEN1_SATA }, - { }, -}; -MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); - static int sata_rcar_probe(struct platform_device *pdev) { const struct of_device_id *of_id; @@ -884,11 +881,10 @@ static int sata_rcar_probe(struct platform_device *pdev) return -ENOMEM; of_id = of_match_device(sata_rcar_match, &pdev->dev); - if (of_id) - priv->type = (enum sata_rcar_type)of_id->data; - else - priv->type = platform_get_device_id(pdev)->driver_data; + if (!of_id) + return -ENODEV; + priv->type = (enum sata_rcar_type)of_id->data; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "failed to get access to sata clock\n"); @@ -1018,7 +1014,6 @@ static const struct dev_pm_ops sata_rcar_pm_ops = { static struct platform_driver sata_rcar_driver = { .probe = sata_rcar_probe, .remove = sata_rcar_remove, - .id_table = sata_rcar_id_table, .driver = { .name = DRV_NAME, .of_match_table = sata_rcar_match, diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index fab504fd9cfd..48301cb3a316 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1396,6 +1396,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) addr = 0; length = size * 1024 * 1024; buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); + if (!buf) + return 1; while (addr < length) { pdc20621_put_to_dimm(host, buf, addr, ECC_ERASE_BUF_SZ); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 3d7fb6516f74..6ac2b2b1e8de 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -347,8 +347,8 @@ static char *next_string(struct sk_buff *skb) */ static int process_status(struct solos_card *card, int port, struct sk_buff *skb) { - char *str, *end, *state_str, *snr, *attn; - int ver, rate_up, rate_down; + char *str, *state_str, *snr, *attn; + int ver, rate_up, rate_down, err; if (!card->atmdev[port]) return -ENODEV; @@ -357,7 +357,11 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb if (!str) return -EIO; - ver = simple_strtol(str, NULL, 10); + err = kstrtoint(str, 10, &ver); + if (err) { + dev_warn(&card->dev->dev, "Unexpected status interrupt version\n"); + return err; + } if (ver < 1) { dev_warn(&card->dev->dev, "Unexpected status interrupt version %d\n", ver); @@ -373,16 +377,16 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb return 0; } - rate_down = simple_strtol(str, &end, 10); - if (*end) - return -EIO; + err = kstrtoint(str, 10, &rate_down); + if (err) + return err; str = next_string(skb); if (!str) return -EIO; - rate_up = simple_strtol(str, &end, 10); - if (*end) - return -EIO; + err = kstrtoint(str, 10, &rate_up); + if (err) + return err; state_str = next_string(skb); if (!state_str) @@ -417,7 +421,7 @@ static int process_command(struct solos_card *card, int port, struct sk_buff *sk struct solos_param *prm; unsigned long flags; int cmdpid; - int found = 0; + int found = 0, err; if (skb->len < 7) return 0; @@ -428,7 +432,9 @@ static int process_command(struct solos_card *card, int port, struct sk_buff *sk skb->data[6] != '\n') return 0; - cmdpid = simple_strtol(&skb->data[1], NULL, 10); + err = kstrtoint(&skb->data[1], 10, &cmdpid); + if (err) + return err; spin_lock_irqsave(&card->param_queue_lock, flags); list_for_each_entry(prm, &card->param_queue, list) { @@ -519,7 +525,7 @@ struct geos_gpio_attr { static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); struct solos_card *card = pci_get_drvdata(pdev); uint32_t data32; @@ -545,7 +551,7 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); struct solos_card *card = pci_get_drvdata(pdev); uint32_t data32; @@ -559,7 +565,7 @@ static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, static ssize_t hardware_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); struct solos_card *card = pci_get_drvdata(pdev); uint32_t data32; diff --git a/drivers/base/base.h b/drivers/base/base.h index 1782f3aa386e..e05db388bd1c 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -131,6 +131,8 @@ extern void device_remove_groups(struct device *dev, extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); +extern void device_block_probing(void); +extern void device_unblock_probing(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/component.c b/drivers/base/component.c index f748430bb654..89f5cf68d80a 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -18,18 +18,24 @@ #include <linux/mutex.h> #include <linux/slab.h> +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + struct component_match { size_t alloc; size_t num; - struct { - void *data; - int (*fn)(struct device *, void *); - } compare[0]; + struct component_match_array *compare; }; struct master { struct list_head node; - struct list_head components; bool bound; const struct component_master_ops *ops; @@ -39,7 +45,6 @@ struct master { struct component { struct list_head node; - struct list_head master_node; struct master *master; bool bound; @@ -63,48 +68,21 @@ static struct master *__master_find(struct device *dev, return NULL; } -/* Attach an unattached component to a master. */ -static void component_attach_master(struct master *master, struct component *c) -{ - c->master = master; - - list_add_tail(&c->master_node, &master->components); -} - -/* Detach a component from a master. */ -static void component_detach_master(struct master *master, struct component *c) -{ - list_del(&c->master_node); - - c->master = NULL; -} - -/* - * Add a component to a master, finding the component via the compare - * function and compare data. This is safe to call for duplicate matches - * and will not result in the same component being added multiple times. - */ -int component_master_add_child(struct master *master, +static struct component *find_component(struct master *master, int (*compare)(struct device *, void *), void *compare_data) { struct component *c; - int ret = -ENXIO; list_for_each_entry(c, &component_list, node) { if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) { - if (!c->master) - component_attach_master(master, c); - ret = 0; - break; - } + if (compare(c->dev, compare_data)) + return c; } - return ret; + return NULL; } -EXPORT_SYMBOL_GPL(component_master_add_child); static int find_components(struct master *master) { @@ -112,39 +90,44 @@ static int find_components(struct master *master) size_t i; int ret = 0; - if (!match) { - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ - return master->ops->add_components(master->dev, master); - } - /* * Scan the array of match functions and attach * any components which are found to this master. */ for (i = 0; i < match->num; i++) { - ret = component_master_add_child(master, - match->compare[i].fn, - match->compare[i].data); - if (ret) + struct component_match_array *mc = &match->compare[i]; + struct component *c; + + dev_dbg(master->dev, "Looking for component %zu\n", i); + + if (match->compare[i].component) + continue; + + c = find_component(master, mc->compare, mc->data); + if (!c) { + ret = -ENXIO; break; + } + + dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master); + + /* Attach this component to the master */ + match->compare[i].duplicate = !!c->master; + match->compare[i].component = c; + c->master = master; } return ret; } -/* Detach all attached components from this master */ -static void master_remove_components(struct master *master) +/* Detach component from associated master */ +static void remove_component(struct master *master, struct component *c) { - while (!list_empty(&master->components)) { - struct component *c = list_first_entry(&master->components, - struct component, master_node); - - WARN_ON(c->master != master); + size_t i; - component_detach_master(master, c); - } + /* Detach the component from this master. */ + for (i = 0; i < master->match->num; i++) + if (master->match->compare[i].component == c) + master->match->compare[i].component = NULL; } /* @@ -159,44 +142,32 @@ static int try_to_bring_up_master(struct master *master, { int ret; - if (master->bound) - return 0; + dev_dbg(master->dev, "trying to bring up master\n"); - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ if (find_components(master)) { - /* Failed to find all components */ - ret = 0; - goto out; + dev_dbg(master->dev, "master has incomplete components\n"); + return 0; } if (component && component->master != master) { - ret = 0; - goto out; + dev_dbg(master->dev, "master is not for this component (%s)\n", + dev_name(component->dev)); + return 0; } - if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; /* Found all components */ ret = master->ops->bind(master->dev); if (ret < 0) { devres_release_group(master->dev, NULL); dev_info(master->dev, "master bind failed: %d\n", ret); - goto out; + return ret; } master->bound = true; return 1; - -out: - master_remove_components(master); - - return ret; } static int try_to_bring_up_masters(struct component *component) @@ -205,9 +176,11 @@ static int try_to_bring_up_masters(struct component *component) int ret = 0; list_for_each_entry(m, &masters, node) { - ret = try_to_bring_up_master(m, component); - if (ret != 0) - break; + if (!m->bound) { + ret = try_to_bring_up_master(m, component); + if (ret != 0) + break; + } } return ret; @@ -220,45 +193,57 @@ static void take_down_master(struct master *master) devres_release_group(master->dev, NULL); master->bound = false; } +} - master_remove_components(master); +static void component_match_release(struct device *master, + struct component_match *match) +{ + unsigned int i; + + for (i = 0; i < match->num; i++) { + struct component_match_array *mc = &match->compare[i]; + + if (mc->release) + mc->release(master, mc->data); + } } -static size_t component_match_size(size_t num) +static void devm_component_match_release(struct device *dev, void *res) { - return offsetof(struct component_match, compare[num]); + component_match_release(dev, res); } -static struct component_match *component_match_realloc(struct device *dev, +static int component_match_realloc(struct device *dev, struct component_match *match, size_t num) { - struct component_match *new; + struct component_match_array *new; - if (match && match->alloc == num) - return match; + if (match->alloc == num) + return 0; - new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL); + new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); if (!new) - return ERR_PTR(-ENOMEM); + return -ENOMEM; - if (match) { - memcpy(new, match, component_match_size(min(match->num, num))); - devm_kfree(dev, match); - } else { - new->num = 0; + if (match->compare) { + memcpy(new, match->compare, sizeof(*new) * + min(match->num, num)); + devm_kfree(dev, match->compare); } + match->compare = new; + match->alloc = num; - new->alloc = num; - - return new; + return 0; } /* - * Add a component to be matched. + * Add a component to be matched, with a release function. * * The match array is first created or extended if necessary. */ -void component_match_add(struct device *dev, struct component_match **matchptr, +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data) { struct component_match *match = *matchptr; @@ -266,22 +251,37 @@ void component_match_add(struct device *dev, struct component_match **matchptr, if (IS_ERR(match)) return; - if (!match || match->num == match->alloc) { - size_t new_size = match ? match->alloc + 16 : 15; + if (!match) { + match = devres_alloc(devm_component_match_release, + sizeof(*match), GFP_KERNEL); + if (!match) { + *matchptr = ERR_PTR(-ENOMEM); + return; + } - match = component_match_realloc(dev, match, new_size); + devres_add(master, match); *matchptr = match; + } + + if (match->num == match->alloc) { + size_t new_size = match ? match->alloc + 16 : 15; + int ret; - if (IS_ERR(match)) + ret = component_match_realloc(master, match, new_size); + if (ret) { + *matchptr = ERR_PTR(ret); return; + } } - match->compare[match->num].fn = compare; + match->compare[match->num].compare = compare; + match->compare[match->num].release = release; match->compare[match->num].data = compare_data; + match->compare[match->num].component = NULL; match->num++; } -EXPORT_SYMBOL(component_match_add); +EXPORT_SYMBOL(component_match_add_release); int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, @@ -290,15 +290,10 @@ int component_master_add_with_match(struct device *dev, struct master *master; int ret; - if (ops->add_components && match) - return -EINVAL; - - if (match) { - /* Reallocate the match array for its true size */ - match = component_match_realloc(dev, match, match->num); - if (IS_ERR(match)) - return PTR_ERR(match); - } + /* Reallocate the match array for its true size */ + ret = component_match_realloc(dev, match, match->num); + if (ret) + return ret; master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) @@ -307,7 +302,6 @@ int component_master_add_with_match(struct device *dev, master->dev = dev; master->ops = ops; master->match = match; - INIT_LIST_HEAD(&master->components); /* Add to the list of available masters. */ mutex_lock(&component_mutex); @@ -326,24 +320,28 @@ int component_master_add_with_match(struct device *dev, } EXPORT_SYMBOL_GPL(component_master_add_with_match); -int component_master_add(struct device *dev, - const struct component_master_ops *ops) -{ - return component_master_add_with_match(dev, ops, NULL); -} -EXPORT_SYMBOL_GPL(component_master_add); - void component_master_del(struct device *dev, const struct component_master_ops *ops) { struct master *master; + int i; mutex_lock(&component_mutex); master = __master_find(dev, ops); if (master) { + struct component_match *match = master->match; + take_down_master(master); list_del(&master->node); + + if (match) { + for (i = 0; i < match->num; i++) { + struct component *c = match->compare[i].component; + if (c) + c->master = NULL; + } + } kfree(master); } mutex_unlock(&component_mutex); @@ -366,6 +364,7 @@ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; struct component *c; + size_t i; WARN_ON(!mutex_is_locked(&component_mutex)); @@ -373,8 +372,12 @@ void component_unbind_all(struct device *master_dev, void *data) if (!master) return; - list_for_each_entry_reverse(c, &master->components, master_node) - component_unbind(c, master, data); + /* Unbind components in reverse order */ + for (i = master->match->num; i--; ) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + component_unbind(c, master, data); + } } EXPORT_SYMBOL_GPL(component_unbind_all); @@ -434,6 +437,7 @@ int component_bind_all(struct device *master_dev, void *data) { struct master *master; struct component *c; + size_t i; int ret = 0; WARN_ON(!mutex_is_locked(&component_mutex)); @@ -442,16 +446,21 @@ int component_bind_all(struct device *master_dev, void *data) if (!master) return -EINVAL; - list_for_each_entry(c, &master->components, master_node) { - ret = component_bind(c, master, data); - if (ret) - break; - } + /* Bind components in match order */ + for (i = 0; i < master->match->num; i++) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + ret = component_bind(c, master, data); + if (ret) + break; + } if (ret != 0) { - list_for_each_entry_continue_reverse(c, &master->components, - master_node) - component_unbind(c, master, data); + for (; i--; ) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + component_unbind(c, master, data); + } } return ret; @@ -499,8 +508,10 @@ void component_del(struct device *dev, const struct component_ops *ops) break; } - if (component && component->master) + if (component && component->master) { take_down_master(component->master); + remove_component(component->master, component); + } mutex_unlock(&component_mutex); diff --git a/drivers/base/core.c b/drivers/base/core.c index b7d56c5ea3c6..0a8bdade53f2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2261,7 +2261,10 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) if (fwnode_is_primary(fn)) fn = fn->secondary; - fwnode->secondary = fn; + if (fn) { + WARN_ON(fwnode->secondary); + fwnode->secondary = fn; + } dev->fwnode = fwnode; } else { dev->fwnode = fwnode_is_primary(dev->fwnode) ? diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a641cf3ccad6..7399be790b5d 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -55,6 +55,13 @@ static struct workqueue_struct *deferred_wq; static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /* + * In some cases, like suspend to RAM or hibernation, It might be reasonable + * to prohibit probing of devices as it could be unsafe. + * Once defer_all_probes is true all drivers probes will be forcibly deferred. + */ +static bool defer_all_probes; + +/* * deferred_probe_work_func() - Retry probing devices in the active list. */ static void deferred_probe_work_func(struct work_struct *work) @@ -172,6 +179,30 @@ static void driver_deferred_probe_trigger(void) } /** + * device_block_probing() - Block/defere device's probes + * + * It will disable probing of devices and defer their probes instead. + */ +void device_block_probing(void) +{ + defer_all_probes = true; + /* sync with probes to avoid races. */ + wait_for_device_probe(); +} + +/** + * device_unblock_probing() - Unblock/enable device's probes + * + * It will restore normal behavior and trigger re-probing of deferred + * devices. + */ +void device_unblock_probing(void) +{ + defer_all_probes = false; + driver_deferred_probe_trigger(); +} + +/** * deferred_probe_initcall() - Enable probing of deferred devices * * We don't want to get in the way when the bulk of drivers are getting probed. @@ -268,6 +299,9 @@ int device_bind_driver(struct device *dev) ret = driver_sysfs_add(dev); if (!ret) driver_bound(dev); + else if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); return ret; } EXPORT_SYMBOL_GPL(device_bind_driver); @@ -277,9 +311,20 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { - int ret = 0; + int ret = -EPROBE_DEFER; int local_trigger_count = atomic_read(&deferred_trigger_count); + if (defer_all_probes) { + /* + * Value of defer_all_probes can be set only by + * device_defer_all_probes_enable() which, in turn, will call + * wait_for_device_probe() right after that to avoid any races. + */ + dev_dbg(dev, "Driver %s force probe deferral\n", drv->name); + driver_deferred_probe_add(dev); + return ret; + } + atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", drv->bus->name, __func__, drv->name, dev_name(dev)); @@ -290,7 +335,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) /* If using pinctrl, bind pins now before probing */ ret = pinctrl_bind_pins(dev); if (ret) - goto probe_failed; + goto pinctrl_bind_failed; if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", @@ -334,12 +379,17 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto done; probe_failed: + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); +pinctrl_bind_failed: devres_release_all(dev); driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); switch (ret) { case -EPROBE_DEFER: @@ -393,6 +443,10 @@ int driver_probe_done(void) */ void wait_for_device_probe(void) { + /* wait for the deferred probe workqueue to finish */ + if (driver_deferred_probe_enable) + flush_workqueue(deferred_wq); + /* wait for the known devices to complete their probing */ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); async_synchronize_full(); @@ -695,13 +749,13 @@ static void __device_release_driver(struct device *dev) dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); klist_remove(&dev->p->knode_driver); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, dev); - } } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 5df4575b5ba7..47c43386786b 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -24,13 +24,17 @@ #include <linux/msi.h> #include <linux/slab.h> -#define DEV_ID_SHIFT 24 +#define DEV_ID_SHIFT 21 +#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) /* * Internal data structure containing a (made up, but unique) devid * and the callback to write the MSI message. */ struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; irq_write_msi_msg_t write_msg; int devid; }; @@ -110,39 +114,49 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info) chip->irq_write_msi_msg = platform_msi_write_msg; } -static void platform_msi_free_descs(struct device *dev) +static void platform_msi_free_descs(struct device *dev, int base, int nvec) { struct msi_desc *desc, *tmp; list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { - list_del(&desc->list); - free_msi_entry(desc); + if (desc->platform.msi_index >= base && + desc->platform.msi_index < (base + nvec)) { + list_del(&desc->list); + free_msi_entry(desc); + } } } -static int platform_msi_alloc_descs(struct device *dev, int nvec, - struct platform_msi_priv_data *data) +static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq, + int nvec, + struct platform_msi_priv_data *data) { - int i; + struct msi_desc *desc; + int i, base = 0; - for (i = 0; i < nvec; i++) { - struct msi_desc *desc; + if (!list_empty(dev_to_msi_list(dev))) { + desc = list_last_entry(dev_to_msi_list(dev), + struct msi_desc, list); + base = desc->platform.msi_index + 1; + } + for (i = 0; i < nvec; i++) { desc = alloc_msi_entry(dev); if (!desc) break; desc->platform.msi_priv_data = data; - desc->platform.msi_index = i; + desc->platform.msi_index = base + i; desc->nvec_used = 1; + desc->irq = virq ? virq + i : 0; list_add_tail(&desc->list, dev_to_msi_list(dev)); } if (i != nvec) { /* Clean up the mess */ - platform_msi_free_descs(dev); + platform_msi_free_descs(dev, base, nvec); return -ENOMEM; } @@ -150,6 +164,13 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec, return 0; } +static int platform_msi_alloc_descs(struct device *dev, int nvec, + struct platform_msi_priv_data *data) + +{ + return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); +} + /** * platform_msi_create_irq_domain - Create a platform MSI interrupt domain * @fwnode: Optional fwnode of the interrupt controller @@ -180,56 +201,75 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, return domain; } -/** - * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev - * @dev: The device for which to allocate interrupts - * @nvec: The number of interrupts to allocate - * @write_msi_msg: Callback to write an interrupt message for @dev - * - * Returns: - * Zero for success, or an error code in case of failure - */ -int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, - irq_write_msi_msg_t write_msi_msg) +static struct platform_msi_priv_data * +platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) { - struct platform_msi_priv_data *priv_data; - int err; - + struct platform_msi_priv_data *datap; /* * Limit the number of interrupts to 256 per device. Should we * need to bump this up, DEV_ID_SHIFT should be adjusted * accordingly (which would impact the max number of MSI * capable devices). */ - if (!dev->msi_domain || !write_msi_msg || !nvec || - nvec > (1 << (32 - DEV_ID_SHIFT))) - return -EINVAL; + if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) + return ERR_PTR(-EINVAL); if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { dev_err(dev, "Incompatible msi_domain, giving up\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } /* Already had a helping of MSI? Greed... */ if (!list_empty(dev_to_msi_list(dev))) - return -EBUSY; + return ERR_PTR(-EBUSY); + + datap = kzalloc(sizeof(*datap), GFP_KERNEL); + if (!datap) + return ERR_PTR(-ENOMEM); + + datap->devid = ida_simple_get(&platform_msi_devid_ida, + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + if (datap->devid < 0) { + int err = datap->devid; + kfree(datap); + return ERR_PTR(err); + } - priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); - if (!priv_data) - return -ENOMEM; + datap->write_msg = write_msi_msg; + datap->dev = dev; - priv_data->devid = ida_simple_get(&platform_msi_devid_ida, - 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); - if (priv_data->devid < 0) { - err = priv_data->devid; - goto out_free_data; - } + return datap; +} + +static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) +{ + ida_simple_remove(&platform_msi_devid_ida, data->devid); + kfree(data); +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *priv_data; + int err; - priv_data->write_msg = write_msi_msg; + priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(priv_data)) + return PTR_ERR(priv_data); err = platform_msi_alloc_descs(dev, nvec, priv_data); if (err) - goto out_free_id; + goto out_free_priv_data; err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); if (err) @@ -238,11 +278,9 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, return 0; out_free_desc: - platform_msi_free_descs(dev); -out_free_id: - ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); -out_free_data: - kfree(priv_data); + platform_msi_free_descs(dev, 0, nvec); +out_free_priv_data: + platform_msi_free_priv_data(priv_data); return err; } @@ -253,18 +291,126 @@ out_free_data: */ void platform_msi_domain_free_irqs(struct device *dev) { - struct msi_desc *desc; + if (!list_empty(dev_to_msi_list(dev))) { + struct msi_desc *desc; + + desc = first_msi_entry(dev); + platform_msi_free_priv_data(desc->platform.msi_priv_data); + } + + msi_domain_free_irqs(dev->msi_domain, dev); + platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); +} + +/** + * platform_msi_get_host_data - Query the private data associated with + * a platform-msi domain + * @domain: The platform-msi domain + * + * Returns the private data provided when calling + * platform_msi_create_device_domain. + */ +void *platform_msi_get_host_data(struct irq_domain *domain) +{ + struct platform_msi_priv_data *data = domain->host_data; + return data->host_data; +} + +/** + * platform_msi_create_device_domain - Create a platform-msi domain + * + * @dev: The device generating the MSIs + * @nvec: The number of MSIs that need to be allocated + * @write_msi_msg: Callback to write an interrupt message for @dev + * @ops: The hierarchy domain operations to use + * @host_data: Private data associated to this domain + * + * Returns an irqdomain for @nvec interrupts + */ +struct irq_domain * +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct platform_msi_priv_data *data; + struct irq_domain *domain; + int err; + + data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(data)) + return NULL; + + data->host_data = host_data; + domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec, + of_node_to_fwnode(dev->of_node), + ops, data); + if (!domain) + goto free_priv; - desc = first_msi_entry(dev); - if (desc) { - struct platform_msi_priv_data *data; + err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg); + if (err) + goto free_domain; + + return domain; - data = desc->platform.msi_priv_data; +free_domain: + irq_domain_remove(domain); +free_priv: + platform_msi_free_priv_data(data); + return NULL; +} + +/** + * platform_msi_domain_free - Free interrupts associated with a platform-msi + * domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the free operation + * @nvec: How many interrupts to free from @virq + */ +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec) +{ + struct platform_msi_priv_data *data = domain->host_data; + struct msi_desc *desc; + for_each_msi_entry(desc, data->dev) { + if (WARN_ON(!desc->irq || desc->nvec_used != 1)) + return; + if (!(desc->irq >= virq && desc->irq < (virq + nvec))) + continue; - ida_simple_remove(&platform_msi_devid_ida, data->devid); - kfree(data); + irq_domain_free_irqs_common(domain, desc->irq, 1); } +} - msi_domain_free_irqs(dev->msi_domain, dev); - platform_msi_free_descs(dev); +/** + * platform_msi_domain_alloc - Allocate interrupts associated with + * a platform-msi domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the allocate operation + * @nvec: How many interrupts to free from @virq + * + * Return 0 on success, or an error code on failure. Must be called + * with irq_domain_mutex held (which can only be done as part of a + * top-level interrupt allocation). + */ +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct platform_msi_priv_data *data = domain->host_data; + int err; + + err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data); + if (err) + return err; + + err = msi_domain_populate_irqs(domain->parent, data->dev, + virq, nr_irqs, &data->arg); + if (err) + platform_msi_domain_free(domain, virq, nr_irqs); + + return err; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1dd6d3bf1098..8dcbb266643b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -26,6 +26,7 @@ #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/limits.h> +#include <linux/property.h> #include "base.h" #include "power/power.h" @@ -117,6 +118,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) EXPORT_SYMBOL_GPL(platform_get_irq); /** + * platform_irq_count - Count the number of IRQs a platform device uses + * @dev: platform device + * + * Return: Number of IRQs a platform device uses or EPROBE_DEFER + */ +int platform_irq_count(struct platform_device *dev) +{ + int ret, nr = 0; + + while ((ret = platform_get_irq(dev, nr)) >= 0) + nr++; + + if (ret == -EPROBE_DEFER) + return ret; + + return nr; +} +EXPORT_SYMBOL_GPL(platform_irq_count); + +/** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type @@ -299,6 +320,22 @@ int platform_device_add_data(struct platform_device *pdev, const void *data, EXPORT_SYMBOL_GPL(platform_device_add_data); /** + * platform_device_add_properties - add built-in properties to a platform device + * @pdev: platform device to add properties to + * @pset: properties to add + * + * The function will take deep copy of the properties in @pset and attach + * the copy to the platform device. The memory associated with properties + * will be freed when the platform device is released. + */ +int platform_device_add_properties(struct platform_device *pdev, + const struct property_set *pset) +{ + return device_add_property_set(&pdev->dev, pset); +} +EXPORT_SYMBOL_GPL(platform_device_add_properties); + +/** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * @@ -409,6 +446,8 @@ void platform_device_del(struct platform_device *pdev) if (r->parent) release_resource(r); } + + device_remove_property_set(&pdev->dev); } } EXPORT_SYMBOL_GPL(platform_device_del); @@ -487,6 +526,12 @@ struct platform_device *platform_device_register_full( if (ret) goto err; + if (pdevinfo->pset) { + ret = platform_device_add_properties(pdev, pdevinfo->pset); + if (ret) + goto err; + } + ret = platform_device_add(pdev); if (ret) { err: diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 60ee5591ee8f..c39b8617280f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -473,6 +473,7 @@ static int pm_clk_notify(struct notifier_block *nb, enable_clock(dev, NULL); } break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: case BUS_NOTIFY_UNBOUND_DRIVER: if (clknb->con_ids[0]) { for (con_id = clknb->con_ids; *con_id; con_id++) diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index f32b802b98f4..f48e33385b3e 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -112,7 +112,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach); /** * dev_pm_domain_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Used to indicate whether we should power off the device. * * This functions will reverse the actions from dev_pm_domain_attach() and thus diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 167418e73445..b80379012840 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) struct generic_pm_domain *genpd; bool (*stop_ok)(struct device *__dev); struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); ktime_t time_start; s64 elapsed_ns; int ret; @@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; + /* + * A runtime PM centric subsystem/driver may re-use the runtime PM + * callbacks for other purposes than runtime PM. In those scenarios + * runtime PM is disabled. Under these circumstances, we shall skip + * validating/measuring the PM QoS latency. + */ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; - if (stop_ok && !stop_ok(dev)) + if (runtime_pm && stop_ok && !stop_ok(dev)) return -EBUSY; /* Measure suspend latency. */ - time_start = ktime_get(); + if (runtime_pm) + time_start = ktime_get(); ret = genpd_save_dev(genpd, dev); if (ret) @@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev) } /* Update suspend latency value if the measured time exceeds it. */ - elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - if (elapsed_ns > td->suspend_latency_ns) { - td->suspend_latency_ns = elapsed_ns; - dev_dbg(dev, "suspend latency exceeded, %lld ns\n", - elapsed_ns); - genpd->max_off_time_changed = true; - td->constraint_changed = true; + if (runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->suspend_latency_ns) { + td->suspend_latency_ns = elapsed_ns; + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } } /* @@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); ktime_t time_start; s64 elapsed_ns; int ret; @@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev) out: /* Measure resume latency. */ - if (timed) + if (timed && runtime_pm) time_start = ktime_get(); genpd_start_dev(genpd, dev); genpd_restore_dev(genpd, dev); /* Update resume latency value if the measured time exceeds it. */ - if (timed) { + if (timed && runtime_pm) { elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > td->resume_latency_ns) { td->resume_latency_ns = elapsed_ns; @@ -1252,6 +1263,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, return ret; } +EXPORT_SYMBOL_GPL(__pm_genpd_add_device); /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. @@ -1302,6 +1314,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, return ret; } +EXPORT_SYMBOL_GPL(pm_genpd_remove_device); /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1710c26ba097..9d626ac08d9c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -963,6 +963,9 @@ void dpm_complete(pm_message_t state) } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); + + /* Allow device probing and trigger re-probing of deferred devices */ + device_unblock_probing(); trace_suspend_resume(TPS("dpm_complete"), state.event, false); } @@ -1624,6 +1627,20 @@ int dpm_prepare(pm_message_t state) trace_suspend_resume(TPS("dpm_prepare"), state.event, true); might_sleep(); + /* + * Give a chance for the known devices to complete their probes, before + * disable probing of devices. This sync point is important at least + * at boot time + hibernation restore. + */ + wait_for_device_probe(); + /* + * It is unsafe if probing of devices will happen during suspend or + * hibernation and system behavior will be unpredictable in this case. + * So, let's prohibit device's probing here and defer their probes + * instead. The normal behavior will be restored in dpm_complete(). + */ + device_block_probing(); + mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile index 33c1e18c41a4..19837ef04d8e 100644 --- a/drivers/base/power/opp/Makefile +++ b/drivers/base/power/opp/Makefile @@ -1,2 +1,3 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG obj-y += core.o cpu.o +obj-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index b8e76f75073b..cf351d3dab1c 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -463,6 +463,7 @@ static void _kfree_list_dev_rcu(struct rcu_head *head) static void _remove_list_dev(struct device_list_opp *list_dev, struct device_opp *dev_opp) { + opp_debug_unregister(list_dev, dev_opp); list_del(&list_dev->node); call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, _kfree_list_dev_rcu); @@ -472,6 +473,7 @@ struct device_list_opp *_add_list_dev(const struct device *dev, struct device_opp *dev_opp) { struct device_list_opp *list_dev; + int ret; list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); if (!list_dev) @@ -481,6 +483,12 @@ struct device_list_opp *_add_list_dev(const struct device *dev, list_dev->dev = dev; list_add_rcu(&list_dev->node, &dev_opp->dev_list); + /* Create debugfs entries for the dev_opp */ + ret = opp_debug_register(list_dev, dev_opp); + if (ret) + dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", + __func__, ret); + return list_dev; } @@ -551,6 +559,12 @@ static void _remove_device_opp(struct device_opp *dev_opp) if (!list_empty(&dev_opp->opp_list)) return; + if (dev_opp->supported_hw) + return; + + if (dev_opp->prop_name) + return; + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, node); @@ -596,6 +610,7 @@ static void _opp_remove(struct device_opp *dev_opp, */ if (notify) srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + opp_debug_remove_one(opp); list_del_rcu(&opp->node); call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); @@ -673,6 +688,7 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, { struct dev_pm_opp *opp; struct list_head *head = &dev_opp->opp_list; + int ret; /* * Insert new OPP in order of increasing frequency and discard if @@ -703,6 +719,11 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, new_opp->dev_opp = dev_opp; list_add_rcu(&new_opp->node, head); + ret = opp_debug_create_one(new_opp, dev_opp); + if (ret) + dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", + __func__, ret); + return 0; } @@ -776,35 +797,49 @@ unlock: } /* TODO: Support multiple regulators */ -static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) +static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, + struct device_opp *dev_opp) { u32 microvolt[3] = {0}; u32 val; int count, ret; + struct property *prop = NULL; + char name[NAME_MAX]; + + /* Search for "opp-microvolt-<name>" */ + if (dev_opp->prop_name) { + snprintf(name, sizeof(name), "opp-microvolt-%s", + dev_opp->prop_name); + prop = of_find_property(opp->np, name, NULL); + } - /* Missing property isn't a problem, but an invalid entry is */ - if (!of_find_property(opp->np, "opp-microvolt", NULL)) - return 0; + if (!prop) { + /* Search for "opp-microvolt" */ + sprintf(name, "opp-microvolt"); + prop = of_find_property(opp->np, name, NULL); - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + /* Missing property isn't a problem, but an invalid entry is */ + if (!prop) + return 0; + } + + count = of_property_count_u32_elems(opp->np, name); if (count < 0) { - dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid %s property (%d)\n", + __func__, name, count); return count; } /* There can be one or three elements here */ if (count != 1 && count != 3) { - dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", + __func__, name, count); return -EINVAL; } - ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, - count); + ret = of_property_read_u32_array(opp->np, name, microvolt, count); if (ret) { - dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, - ret); + dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); return -EINVAL; } @@ -812,13 +847,272 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) opp->u_volt_min = microvolt[1]; opp->u_volt_max = microvolt[2]; - if (!of_property_read_u32(opp->np, "opp-microamp", &val)) + /* Search for "opp-microamp-<name>" */ + prop = NULL; + if (dev_opp->prop_name) { + snprintf(name, sizeof(name), "opp-microamp-%s", + dev_opp->prop_name); + prop = of_find_property(opp->np, name, NULL); + } + + if (!prop) { + /* Search for "opp-microamp" */ + sprintf(name, "opp-microamp"); + prop = of_find_property(opp->np, name, NULL); + } + + if (prop && !of_property_read_u32(opp->np, name, &val)) opp->u_amp = val; return 0; } /** + * dev_pm_opp_set_supported_hw() - Set supported platforms + * @dev: Device for which supported-hw has to be set. + * @versions: Array of hierarchy of versions to match. + * @count: Number of elements in the array. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the hierarchy of versions it supports. OPP layer will then enable + * OPPs, which are available for those versions, based on its 'opp-supported-hw' + * property. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count) +{ + struct device_opp *dev_opp; + int ret = 0; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _add_device_opp(dev); + if (!dev_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + /* Do we already have a version hierarchy associated with dev_opp? */ + if (dev_opp->supported_hw) { + dev_err(dev, "%s: Already have supported hardware list\n", + __func__); + ret = -EBUSY; + goto err; + } + + dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions), + GFP_KERNEL); + if (!dev_opp->supported_hw) { + ret = -ENOMEM; + goto err; + } + + dev_opp->supported_hw_count = count; + mutex_unlock(&dev_opp_list_lock); + return 0; + +err: + _remove_device_opp(dev_opp); +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); + +/** + * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw + * @dev: Device for which supported-hw has to be set. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure + * will not be freed. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_supported_hw(struct device *dev) +{ + struct device_opp *dev_opp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + if (!dev_opp->supported_hw) { + dev_err(dev, "%s: Doesn't have supported hardware list\n", + __func__); + goto unlock; + } + + kfree(dev_opp->supported_hw); + dev_opp->supported_hw = NULL; + dev_opp->supported_hw_count = 0; + + /* Try freeing device_opp if this was the last blocking resource */ + _remove_device_opp(dev_opp); + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); + +/** + * dev_pm_opp_set_prop_name() - Set prop-extn name + * @dev: Device for which the regulator has to be set. + * @name: name to postfix to properties. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the extn to be used for certain property names. The properties to + * which the extension will apply are opp-microvolt and opp-microamp. OPP core + * should postfix the property name with -<name> while looking for them. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + struct device_opp *dev_opp; + int ret = 0; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _add_device_opp(dev); + if (!dev_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + /* Do we already have a prop-name associated with dev_opp? */ + if (dev_opp->prop_name) { + dev_err(dev, "%s: Already have prop-name %s\n", __func__, + dev_opp->prop_name); + ret = -EBUSY; + goto err; + } + + dev_opp->prop_name = kstrdup(name, GFP_KERNEL); + if (!dev_opp->prop_name) { + ret = -ENOMEM; + goto err; + } + + mutex_unlock(&dev_opp_list_lock); + return 0; + +err: + _remove_device_opp(dev_opp); +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); + +/** + * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name + * @dev: Device for which the regulator has to be set. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure + * will not be freed. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_prop_name(struct device *dev) +{ + struct device_opp *dev_opp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + if (!dev_opp->prop_name) { + dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); + goto unlock; + } + + kfree(dev_opp->prop_name); + dev_opp->prop_name = NULL; + + /* Try freeing device_opp if this was the last blocking resource */ + _remove_device_opp(dev_opp); + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); + +static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp, + struct device_node *np) +{ + unsigned int count = dev_opp->supported_hw_count; + u32 version; + int ret; + + if (!dev_opp->supported_hw) + return true; + + while (count--) { + ret = of_property_read_u32_index(np, "opp-supported-hw", count, + &version); + if (ret) { + dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", + __func__, count, ret); + return false; + } + + /* Both of these are bitwise masks of the versions */ + if (!(version & dev_opp->supported_hw[count])) + return false; + } + + return true; +} + +/** * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) * @dev: device for which we do this operation * @np: device node @@ -864,6 +1158,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) goto free_opp; } + /* Check if the OPP supports hardware's hierarchy of versions or not */ + if (!_opp_is_supported(dev, dev_opp, np)) { + dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); + goto free_opp; + } + /* * Rate is defined as an unsigned long in clk API, and so casting * explicitly to its type. Must be fixed once rate is 64 bit @@ -879,7 +1179,7 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; - ret = opp_parse_supplies(new_opp, dev); + ret = opp_parse_supplies(new_opp, dev, dev_opp); if (ret) goto free_opp; @@ -889,12 +1189,14 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) /* OPP to select on device suspend */ if (of_property_read_bool(np, "opp-suspend")) { - if (dev_opp->suspend_opp) + if (dev_opp->suspend_opp) { dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", __func__, dev_opp->suspend_opp->rate, new_opp->rate); - else + } else { + new_opp->suspend = true; dev_opp->suspend_opp = new_opp; + } } if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 7b445e88a0d5..9f0c15570f64 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c @@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); /* * Works only for OPP v2 bindings. * - * cpumask should be already set to mask of cpu_dev->id. * Returns -ENOENT if operating-points-v2 bindings aren't supported. */ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) @@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask return -ENOENT; } + cpumask_set_cpu(cpu_dev->id, cpumask); + /* OPPs are shared ? */ if (!of_property_read_bool(np, "opp-shared")) goto put_cpu_node; diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c new file mode 100644 index 000000000000..ddfe4773e922 --- /dev/null +++ b/drivers/base/power/opp/debugfs.c @@ -0,0 +1,219 @@ +/* + * Generic OPP debugfs interface + * + * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/limits.h> + +#include "opp.h" + +static struct dentry *rootdir; + +static void opp_set_dev_name(const struct device *dev, char *name) +{ + if (dev->parent) + snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent), + dev_name(dev)); + else + snprintf(name, NAME_MAX, "%s", dev_name(dev)); +} + +void opp_debug_remove_one(struct dev_pm_opp *opp) +{ + debugfs_remove_recursive(opp->dentry); +} + +int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp) +{ + struct dentry *pdentry = dev_opp->dentry; + struct dentry *d; + char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */ + + /* Rate is unique to each OPP, use it to give opp-name */ + snprintf(name, sizeof(name), "opp:%lu", opp->rate); + + /* Create per-opp directory */ + d = debugfs_create_dir(name, pdentry); + if (!d) + return -ENOMEM; + + if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) + return -ENOMEM; + + if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) + return -ENOMEM; + + if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) + return -ENOMEM; + + if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) + return -ENOMEM; + + if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp)) + return -ENOMEM; + + if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, + &opp->clock_latency_ns)) + return -ENOMEM; + + opp->dentry = d; + return 0; +} + +static int device_opp_debug_create_dir(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + const struct device *dev = list_dev->dev; + struct dentry *d; + + opp_set_dev_name(dev, dev_opp->dentry_name); + + /* Create device specific directory */ + d = debugfs_create_dir(dev_opp->dentry_name, rootdir); + if (!d) { + dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); + return -ENOMEM; + } + + list_dev->dentry = d; + dev_opp->dentry = d; + + return 0; +} + +static int device_opp_debug_create_link(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + const struct device *dev = list_dev->dev; + char name[NAME_MAX]; + struct dentry *d; + + opp_set_dev_name(list_dev->dev, name); + + /* Create device specific directory link */ + d = debugfs_create_symlink(name, rootdir, dev_opp->dentry_name); + if (!d) { + dev_err(dev, "%s: Failed to create link\n", __func__); + return -ENOMEM; + } + + list_dev->dentry = d; + + return 0; +} + +/** + * opp_debug_register - add a device opp node to the debugfs 'opp' directory + * @list_dev: list-dev pointer for device + * @dev_opp: the device-opp being added + * + * Dynamically adds device specific directory in debugfs 'opp' directory. If the + * device-opp is shared with other devices, then links will be created for all + * devices except the first. + * + * Return: 0 on success, otherwise negative error. + */ +int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + if (!rootdir) { + pr_debug("%s: Uninitialized rootdir\n", __func__); + return -EINVAL; + } + + if (dev_opp->dentry) + return device_opp_debug_create_link(list_dev, dev_opp); + + return device_opp_debug_create_dir(list_dev, dev_opp); +} + +static void opp_migrate_dentry(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *new_dev; + const struct device *dev; + struct dentry *dentry; + + /* Look for next list-dev */ + list_for_each_entry(new_dev, &dev_opp->dev_list, node) + if (new_dev != list_dev) + break; + + /* new_dev is guaranteed to be valid here */ + dev = new_dev->dev; + debugfs_remove_recursive(new_dev->dentry); + + opp_set_dev_name(dev, dev_opp->dentry_name); + + dentry = debugfs_rename(rootdir, list_dev->dentry, rootdir, + dev_opp->dentry_name); + if (!dentry) { + dev_err(dev, "%s: Failed to rename link from: %s to %s\n", + __func__, dev_name(list_dev->dev), dev_name(dev)); + return; + } + + new_dev->dentry = dentry; + dev_opp->dentry = dentry; +} + +/** + * opp_debug_unregister - remove a device opp node from debugfs opp directory + * @list_dev: list-dev pointer for device + * @dev_opp: the device-opp being removed + * + * Dynamically removes device specific directory from debugfs 'opp' directory. + */ +void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + if (list_dev->dentry == dev_opp->dentry) { + /* Move the real dentry object under another device */ + if (!list_is_singular(&dev_opp->dev_list)) { + opp_migrate_dentry(list_dev, dev_opp); + goto out; + } + dev_opp->dentry = NULL; + } + + debugfs_remove_recursive(list_dev->dentry); + +out: + list_dev->dentry = NULL; +} + +static int __init opp_debug_init(void) +{ + /* Create /sys/kernel/debug/opp directory */ + rootdir = debugfs_create_dir("opp", NULL); + if (!rootdir) { + pr_err("%s: Failed to create root directory\n", __func__); + return -ENOMEM; + } + + return 0; +} +core_initcall(opp_debug_init); diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 7366b2aa8997..690638ef36ee 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -17,6 +17,7 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/limits.h> #include <linux/pm_opp.h> #include <linux/rculist.h> #include <linux/rcupdate.h> @@ -50,9 +51,10 @@ extern struct mutex dev_opp_list_lock; * are protected by the dev_opp_list_lock for integrity. * IMPORTANT: the opp nodes should be maintained in increasing * order. - * @dynamic: not-created from static DT entries. * @available: true/false - marks if this OPP as available or not + * @dynamic: not-created from static DT entries. * @turbo: true if turbo (boost) OPP + * @suspend: true if suspend OPP * @rate: Frequency in hertz * @u_volt: Target voltage in microvolts corresponding to this OPP * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP @@ -63,6 +65,7 @@ extern struct mutex dev_opp_list_lock; * @dev_opp: points back to the device_opp struct this opp belongs to * @rcu_head: RCU callback head used for deferred freeing * @np: OPP's device node. + * @dentry: debugfs dentry pointer (per opp) * * This structure stores the OPP information for a given device. */ @@ -72,6 +75,7 @@ struct dev_pm_opp { bool available; bool dynamic; bool turbo; + bool suspend; unsigned long rate; unsigned long u_volt; @@ -84,6 +88,10 @@ struct dev_pm_opp { struct rcu_head rcu_head; struct device_node *np; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif }; /** @@ -91,6 +99,7 @@ struct dev_pm_opp { * @node: list node * @dev: device to which the struct object belongs * @rcu_head: RCU callback head used for deferred freeing + * @dentry: debugfs dentry pointer (per device) * * This is an internal data structure maintaining the list of devices that are * managed by 'struct device_opp'. @@ -99,6 +108,10 @@ struct device_list_opp { struct list_head node; const struct device *dev; struct rcu_head rcu_head; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif }; /** @@ -113,7 +126,14 @@ struct device_list_opp { * @dev_list: list of devices that share these OPPs * @opp_list: list of opps * @np: struct device_node pointer for opp's DT node. + * @clock_latency_ns_max: Max clock latency in nanoseconds. * @shared_opp: OPP is shared between multiple devices. + * @suspend_opp: Pointer to OPP to be used during device suspend. + * @supported_hw: Array of version number to support. + * @supported_hw_count: Number of elements in supported_hw array. + * @prop_name: A name to postfix to many DT properties, while parsing them. + * @dentry: debugfs dentry pointer of the real device directory (not links). + * @dentry_name: Name of the real dentry. * * This is an internal data structure maintaining the link to opps attached to * a device. This structure is not meant to be shared to users as it is @@ -135,6 +155,15 @@ struct device_opp { unsigned long clock_latency_ns_max; bool shared_opp; struct dev_pm_opp *suspend_opp; + + unsigned int *supported_hw; + unsigned int supported_hw_count; + const char *prop_name; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; + char dentry_name[NAME_MAX]; +#endif }; /* Routines internal to opp core */ @@ -143,4 +172,26 @@ struct device_list_opp *_add_list_dev(const struct device *dev, struct device_opp *dev_opp); struct device_node *_of_get_opp_desc_node(struct device *dev); +#ifdef CONFIG_DEBUG_FS +void opp_debug_remove_one(struct dev_pm_opp *opp); +int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp); +int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp); +void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp); +#else +static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} + +static inline int opp_debug_create_one(struct dev_pm_opp *opp, + struct device_opp *dev_opp) +{ return 0; } +static inline int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ return 0; } + +static inline void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ } +#endif /* DEBUG_FS */ + #endif /* __DRIVER_OPP_H__ */ diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 998fa6b23084..8b06193d4a5e 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -18,6 +18,7 @@ static inline void pm_runtime_early_init(struct device *dev) } extern void pm_runtime_init(struct device *dev); +extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_remove(struct device *dev); struct wake_irq { @@ -84,6 +85,7 @@ static inline void pm_runtime_early_init(struct device *dev) } static inline void pm_runtime_init(struct device *dev) {} +static inline void pm_runtime_reinit(struct device *dev) {} static inline void pm_runtime_remove(struct device *dev) {} static inline int dpm_sysfs_add(struct device *dev) { return 0; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index e1a10a03df8e..4c7055009bd6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -966,6 +966,30 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** + * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. + * @dev: Device to handle. + * + * Return -EINVAL if runtime PM is disabled for the device. + * + * If that's not the case and if the device's runtime PM status is RPM_ACTIVE + * and the runtime PM usage counter is nonzero, increment the counter and + * return 1. Otherwise return 0 without changing the counter. + */ +int pm_runtime_get_if_in_use(struct device *dev) +{ + unsigned long flags; + int retval; + + spin_lock_irqsave(&dev->power.lock, flags); + retval = dev->power.disable_depth > 0 ? -EINVAL : + dev->power.runtime_status == RPM_ACTIVE + && atomic_inc_not_zero(&dev->power.usage_count); + spin_unlock_irqrestore(&dev->power.lock, flags); + return retval; +} +EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); + +/** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. * @status: New runtime PM status of the device. @@ -1390,18 +1414,32 @@ void pm_runtime_init(struct device *dev) } /** + * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. + * @dev: Device object to re-initialize. + */ +void pm_runtime_reinit(struct device *dev) +{ + if (!pm_runtime_enabled(dev)) { + if (dev->power.runtime_status == RPM_ACTIVE) + pm_runtime_set_suspended(dev); + if (dev->power.irq_safe) { + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 0; + spin_unlock_irq(&dev->power.lock); + if (dev->parent) + pm_runtime_put(dev->parent); + } + } +} + +/** * pm_runtime_remove - Prepare for removing a device from device hierarchy. * @dev: Device object being removed from device hierarchy. */ void pm_runtime_remove(struct device *dev) { __pm_runtime_disable(dev, false); - - /* Change the status back to 'suspended' to match the initial status. */ - if (dev->power.runtime_status == RPM_ACTIVE) - pm_runtime_set_suspended(dev); - if (dev->power.irq_safe && dev->parent) - pm_runtime_put(dev->parent); + pm_runtime_reinit(dev); } /** diff --git a/drivers/base/property.c b/drivers/base/property.c index 1325ff225cc4..c359351d50f1 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -19,32 +19,14 @@ #include <linux/etherdevice.h> #include <linux/phy.h> -/** - * device_add_property_set - Add a collection of properties to a device object. - * @dev: Device to add properties to. - * @pset: Collection of properties to add. - * - * Associate a collection of device properties represented by @pset with @dev - * as its secondary firmware node. - */ -void device_add_property_set(struct device *dev, struct property_set *pset) -{ - if (!pset) - return; - - pset->fwnode.type = FWNODE_PDATA; - set_secondary_fwnode(dev, &pset->fwnode); -} -EXPORT_SYMBOL_GPL(device_add_property_set); - -static inline bool is_pset(struct fwnode_handle *fwnode) +static inline bool is_pset_node(struct fwnode_handle *fwnode) { return fwnode && fwnode->type == FWNODE_PDATA; } -static inline struct property_set *to_pset(struct fwnode_handle *fwnode) +static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) { - return is_pset(fwnode) ? + return is_pset_node(fwnode) ? container_of(fwnode, struct property_set, fwnode) : NULL; } @@ -63,45 +45,135 @@ static struct property_entry *pset_prop_get(struct property_set *pset, return NULL; } -static int pset_prop_read_array(struct property_set *pset, const char *name, - enum dev_prop_type type, void *val, size_t nval) +static void *pset_prop_find(struct property_set *pset, const char *propname, + size_t length) { struct property_entry *prop; - unsigned int item_size; + void *pointer; - prop = pset_prop_get(pset, name); + prop = pset_prop_get(pset, propname); if (!prop) - return -ENODATA; + return ERR_PTR(-EINVAL); + if (prop->is_array) + pointer = prop->pointer.raw_data; + else + pointer = &prop->value.raw_data; + if (!pointer) + return ERR_PTR(-ENODATA); + if (length > prop->length) + return ERR_PTR(-EOVERFLOW); + return pointer; +} + +static int pset_prop_read_u8_array(struct property_set *pset, + const char *propname, + u8 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u16_array(struct property_set *pset, + const char *propname, + u16 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u32_array(struct property_set *pset, + const char *propname, + u32 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u64_array(struct property_set *pset, + const char *propname, + u64 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_count_elems_of_size(struct property_set *pset, + const char *propname, size_t length) +{ + struct property_entry *prop; + + prop = pset_prop_get(pset, propname); + if (!prop) + return -EINVAL; + + return prop->length / length; +} + +static int pset_prop_read_string_array(struct property_set *pset, + const char *propname, + const char **strings, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*strings); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(strings, pointer, length); + return 0; +} + +static int pset_prop_read_string(struct property_set *pset, + const char *propname, const char **strings) +{ + struct property_entry *prop; + const char **pointer; - if (prop->type != type) - return -EPROTO; - - if (!val) - return prop->nval; - - if (prop->nval < nval) - return -EOVERFLOW; - - switch (type) { - case DEV_PROP_U8: - item_size = sizeof(u8); - break; - case DEV_PROP_U16: - item_size = sizeof(u16); - break; - case DEV_PROP_U32: - item_size = sizeof(u32); - break; - case DEV_PROP_U64: - item_size = sizeof(u64); - break; - case DEV_PROP_STRING: - item_size = sizeof(const char *); - break; - default: + prop = pset_prop_get(pset, propname); + if (!prop) return -EINVAL; + if (!prop->is_string) + return -EILSEQ; + if (prop->is_array) { + pointer = prop->pointer.str; + if (!pointer) + return -ENODATA; + } else { + pointer = &prop->value.str; + if (*pointer && strnlen(*pointer, prop->length) >= prop->length) + return -EILSEQ; } - memcpy(val, prop->value.raw_data, nval * item_size); + + *strings = *pointer; return 0; } @@ -124,6 +196,18 @@ bool device_property_present(struct device *dev, const char *propname) } EXPORT_SYMBOL_GPL(device_property_present); +static bool __fwnode_property_present(struct fwnode_handle *fwnode, + const char *propname) +{ + if (is_of_node(fwnode)) + return of_property_read_bool(to_of_node(fwnode), propname); + else if (is_acpi_node(fwnode)) + return !acpi_node_prop_get(fwnode, propname, NULL); + else if (is_pset_node(fwnode)) + return !!pset_prop_get(to_pset_node(fwnode), propname); + return false; +} + /** * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check @@ -131,12 +215,12 @@ EXPORT_SYMBOL_GPL(device_property_present); */ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { - if (is_of_node(fwnode)) - return of_property_read_bool(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return !acpi_node_prop_get(fwnode, propname, NULL); + bool ret; - return !!pset_prop_get(to_pset(fwnode), propname); + ret = __fwnode_property_present(fwnode, propname); + if (ret == false && fwnode && fwnode->secondary) + ret = __fwnode_property_present(fwnode->secondary, propname); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_present); @@ -309,25 +393,40 @@ int device_property_match_string(struct device *dev, const char *propname, } EXPORT_SYMBOL_GPL(device_property_match_string); -#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ - (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ +#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ + (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ : of_property_count_elems_of_size((node), (propname), sizeof(type)) -#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ -({ \ - int _ret_; \ - if (is_of_node(_fwnode_)) \ - _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ - _type_, _val_, _nval_); \ - else if (is_acpi_node(_fwnode_)) \ - _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \ - _val_, _nval_); \ - else if (is_pset(_fwnode_)) \ - _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \ - _proptype_, _val_, _nval_); \ - else \ - _ret_ = -ENXIO; \ - _ret_; \ +#define PSET_PROP_READ_ARRAY(node, propname, type, val, nval) \ + (val) ? pset_prop_read_##type##_array((node), (propname), (val), (nval)) \ + : pset_prop_count_elems_of_size((node), (propname), sizeof(type)) + +#define FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ +({ \ + int _ret_; \ + if (is_of_node(_fwnode_)) \ + _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ + _type_, _val_, _nval_); \ + else if (is_acpi_node(_fwnode_)) \ + _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \ + _val_, _nval_); \ + else if (is_pset_node(_fwnode_)) \ + _ret_ = PSET_PROP_READ_ARRAY(to_pset_node(_fwnode_), _propname_, \ + _type_, _val_, _nval_); \ + else \ + _ret_ = -ENXIO; \ + _ret_; \ +}) + +#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ +({ \ + int _ret_; \ + _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ + _val_, _nval_); \ + if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \ + _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ + _proptype_, _val_, _nval_); \ + _ret_; \ }) /** @@ -434,6 +533,41 @@ int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); +static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + if (is_of_node(fwnode)) + return val ? + of_property_read_string_array(to_of_node(fwnode), + propname, val, nval) : + of_property_count_strings(to_of_node(fwnode), propname); + else if (is_acpi_node(fwnode)) + return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, + val, nval); + else if (is_pset_node(fwnode)) + return val ? + pset_prop_read_string_array(to_pset_node(fwnode), + propname, val, nval) : + pset_prop_count_elems_of_size(to_pset_node(fwnode), + propname, + sizeof(const char *)); + return -ENXIO; +} + +static int __fwnode_property_read_string(struct fwnode_handle *fwnode, + const char *propname, const char **val) +{ + if (is_of_node(fwnode)) + return of_property_read_string(to_of_node(fwnode), propname, val); + else if (is_acpi_node(fwnode)) + return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, + val, 1); + else if (is_pset_node(fwnode)) + return pset_prop_read_string(to_pset_node(fwnode), propname, val); + return -ENXIO; +} + /** * fwnode_property_read_string_array - return string array property of a node * @fwnode: Firmware node to get the property of @@ -456,18 +590,13 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { - if (is_of_node(fwnode)) - return val ? - of_property_read_string_array(to_of_node(fwnode), - propname, val, nval) : - of_property_count_strings(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, - val, nval); - else if (is_pset(fwnode)) - return pset_prop_read_array(to_pset(fwnode), propname, - DEV_PROP_STRING, val, nval); - return -ENXIO; + int ret; + + ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); + if (ret == -EINVAL && fwnode && fwnode->secondary) + ret = __fwnode_property_read_string_array(fwnode->secondary, + propname, val, nval); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -489,14 +618,13 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { - if (is_of_node(fwnode)) - return of_property_read_string(to_of_node(fwnode), propname, val); - else if (is_acpi_node(fwnode)) - return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, - val, 1); + int ret; - return pset_prop_read_array(to_pset(fwnode), propname, - DEV_PROP_STRING, val, 1); + ret = __fwnode_property_read_string(fwnode, propname, val); + if (ret == -EINVAL && fwnode && fwnode->secondary) + ret = __fwnode_property_read_string(fwnode->secondary, + propname, val); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string); @@ -525,6 +653,9 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode, if (nval < 0) return nval; + if (nval == 0) + return -ENODATA; + values = kcalloc(nval, sizeof(*values), GFP_KERNEL); if (!values) return -ENOMEM; @@ -547,6 +678,182 @@ out: EXPORT_SYMBOL_GPL(fwnode_property_match_string); /** + * pset_free_set - releases memory allocated for copied property set + * @pset: Property set to release + * + * Function takes previously copied property set and releases all the + * memory allocated to it. + */ +static void pset_free_set(struct property_set *pset) +{ + const struct property_entry *prop; + size_t i, nval; + + if (!pset) + return; + + for (prop = pset->properties; prop->name; prop++) { + if (prop->is_array) { + if (prop->is_string && prop->pointer.str) { + nval = prop->length / sizeof(const char *); + for (i = 0; i < nval; i++) + kfree(prop->pointer.str[i]); + } + kfree(prop->pointer.raw_data); + } else if (prop->is_string) { + kfree(prop->value.str); + } + kfree(prop->name); + } + + kfree(pset->properties); + kfree(pset); +} + +static int pset_copy_entry(struct property_entry *dst, + const struct property_entry *src) +{ + const char **d, **s; + size_t i, nval; + + dst->name = kstrdup(src->name, GFP_KERNEL); + if (!dst->name) + return -ENOMEM; + + if (src->is_array) { + if (!src->length) + return -ENODATA; + + if (src->is_string) { + nval = src->length / sizeof(const char *); + dst->pointer.str = kcalloc(nval, sizeof(const char *), + GFP_KERNEL); + if (!dst->pointer.str) + return -ENOMEM; + + d = dst->pointer.str; + s = src->pointer.str; + for (i = 0; i < nval; i++) { + d[i] = kstrdup(s[i], GFP_KERNEL); + if (!d[i] && s[i]) + return -ENOMEM; + } + } else { + dst->pointer.raw_data = kmemdup(src->pointer.raw_data, + src->length, GFP_KERNEL); + if (!dst->pointer.raw_data) + return -ENOMEM; + } + } else if (src->is_string) { + dst->value.str = kstrdup(src->value.str, GFP_KERNEL); + if (!dst->value.str && src->value.str) + return -ENOMEM; + } else { + dst->value.raw_data = src->value.raw_data; + } + + dst->length = src->length; + dst->is_array = src->is_array; + dst->is_string = src->is_string; + + return 0; +} + +/** + * pset_copy_set - copies property set + * @pset: Property set to copy + * + * This function takes a deep copy of the given property set and returns + * pointer to the copy. Call device_free_property_set() to free resources + * allocated in this function. + * + * Return: Pointer to the new property set or error pointer. + */ +static struct property_set *pset_copy_set(const struct property_set *pset) +{ + const struct property_entry *entry; + struct property_set *p; + size_t i, n = 0; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + while (pset->properties[n].name) + n++; + + p->properties = kcalloc(n + 1, sizeof(*entry), GFP_KERNEL); + if (!p->properties) { + kfree(p); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < n; i++) { + int ret = pset_copy_entry(&p->properties[i], + &pset->properties[i]); + if (ret) { + pset_free_set(p); + return ERR_PTR(ret); + } + } + + return p; +} + +/** + * device_remove_property_set - Remove properties from a device object. + * @dev: Device whose properties to remove. + * + * The function removes properties previously associated to the device + * secondary firmware node with device_add_property_set(). Memory allocated + * to the properties will also be released. + */ +void device_remove_property_set(struct device *dev) +{ + struct fwnode_handle *fwnode; + + fwnode = dev_fwnode(dev); + if (!fwnode) + return; + /* + * Pick either primary or secondary node depending which one holds + * the pset. If there is no real firmware node (ACPI/DT) primary + * will hold the pset. + */ + if (!is_pset_node(fwnode)) + fwnode = fwnode->secondary; + if (!IS_ERR(fwnode) && is_pset_node(fwnode)) + pset_free_set(to_pset_node(fwnode)); + set_secondary_fwnode(dev, NULL); +} +EXPORT_SYMBOL_GPL(device_remove_property_set); + +/** + * device_add_property_set - Add a collection of properties to a device object. + * @dev: Device to add properties to. + * @pset: Collection of properties to add. + * + * Associate a collection of device properties represented by @pset with @dev + * as its secondary firmware node. The function takes a copy of @pset. + */ +int device_add_property_set(struct device *dev, const struct property_set *pset) +{ + struct property_set *p; + + if (!pset) + return -EINVAL; + + p = pset_copy_set(pset); + if (IS_ERR(p)) + return PTR_ERR(p); + + p->fwnode.type = FWNODE_PDATA; + set_secondary_fwnode(dev, &p->fwnode); + return 0; +} +EXPORT_SYMBOL_GPL(device_add_property_set); + +/** * device_get_next_child_node - Return the next child node handle for a device * @dev: Device to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c index 0246f44ded74..686c9e0b930e 100644 --- a/drivers/base/regmap/regcache-flat.c +++ b/drivers/base/regmap/regcache-flat.c @@ -21,7 +21,7 @@ static int regcache_flat_init(struct regmap *map) int i; unsigned int *cache; - map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1), + map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int), GFP_KERNEL); if (!map->cache) return -ENOMEM; diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c index 736e0d378567..6f77d7319fc6 100644 --- a/drivers/base/regmap/regcache-lzo.c +++ b/drivers/base/regmap/regcache-lzo.c @@ -139,7 +139,7 @@ static int regcache_lzo_init(struct regmap *map) ret = 0; blkcount = regcache_lzo_block_count(map); - map->cache = kzalloc(blkcount * sizeof *lzo_blocks, + map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), GFP_KERNEL); if (!map->cache) return -ENOMEM; @@ -152,8 +152,8 @@ static int regcache_lzo_init(struct regmap *map) * that register. */ bmp_size = map->num_reg_defaults_raw; - sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), - GFP_KERNEL); + sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long), + GFP_KERNEL); if (!sync_bmp) { ret = -ENOMEM; goto err; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 56486d92c4e7..aa56af87d941 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -361,13 +361,14 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) rbnode->base_reg = reg; } - rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, - GFP_KERNEL); + rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, + GFP_KERNEL); if (!rbnode->block) goto err_free; - rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) * - sizeof(*rbnode->cache_present), GFP_KERNEL); + rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), + sizeof(*rbnode->cache_present), + GFP_KERNEL); if (!rbnode->cache_present) goto err_free_block; @@ -413,8 +414,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, max = reg + max_dist; /* look for an adjacent register to the one we are about to add */ - for (node = rb_first(&rbtree_ctx->root); node; - node = rb_next(node)) { + node = rbtree_ctx->root.rb_node; + while (node) { rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); @@ -425,6 +426,11 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, new_base_reg = min(reg, base_reg); new_top_reg = max(reg, top_reg); } else { + if (max < base_reg) + node = node->rb_left; + else + node = node->rb_right; + continue; } diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 4c07802986b2..348be3a35410 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -100,15 +100,25 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) int i; void *tmp_buf; - for (i = 0; i < config->num_reg_defaults; i++) - if (config->reg_defaults[i].reg % map->reg_stride) - return -EINVAL; - if (map->cache_type == REGCACHE_NONE) { + if (config->reg_defaults || config->num_reg_defaults_raw) + dev_warn(map->dev, + "No cache used with register defaults set!\n"); + map->cache_bypass = true; return 0; } + if (config->reg_defaults && !config->num_reg_defaults) { + dev_err(map->dev, + "Register defaults are set without the number!\n"); + return -EINVAL; + } + + for (i = 0; i < config->num_reg_defaults; i++) + if (config->reg_defaults[i].reg % map->reg_stride) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(cache_types); i++) if (cache_types[i]->type == map->cache_type) break; @@ -138,8 +148,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) * a copy of it. */ if (config->reg_defaults) { - if (!map->num_reg_defaults) - return -EINVAL; tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * sizeof(struct reg_default), GFP_KERNEL); if (!tmp_buf) @@ -535,19 +543,30 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, switch (map->cache_word_size) { case 1: { u8 *cache = base; + cache[idx] = val; break; } case 2: { u16 *cache = base; + cache[idx] = val; break; } case 4: { u32 *cache = base; + + cache[idx] = val; + break; + } +#ifdef CONFIG_64BIT + case 8: { + u64 *cache = base; + cache[idx] = val; break; } +#endif default: BUG(); } @@ -568,16 +587,26 @@ unsigned int regcache_get_val(struct regmap *map, const void *base, switch (map->cache_word_size) { case 1: { const u8 *cache = base; + return cache[idx]; } case 2: { const u16 *cache = base; + return cache[idx]; } case 4: { const u32 *cache = base; + + return cache[idx]; + } +#ifdef CONFIG_64BIT + case 8: { + const u64 *cache = base; + return cache[idx]; } +#endif default: BUG(); } diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 3f0a7e262d69..1ee3d40861c7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -397,72 +397,39 @@ static const struct file_operations regmap_reg_ranges_fops = { .llseek = default_llseek, }; -static ssize_t regmap_access_read_file(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) +static int regmap_access_show(struct seq_file *s, void *ignored) { - int reg_len, tot_len; - size_t buf_pos = 0; - loff_t p = 0; - ssize_t ret; - int i; - struct regmap *map = file->private_data; - char *buf; - - if (*ppos < 0 || !count) - return -EINVAL; + struct regmap *map = s->private; + int i, reg_len; - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* Calculate the length of a fixed format */ reg_len = regmap_calc_reg_len(map->max_register); - tot_len = reg_len + 10; /* ': R W V P\n' */ for (i = 0; i <= map->max_register; i += map->reg_stride) { /* Ignore registers which are neither readable nor writable */ if (!regmap_readable(map, i) && !regmap_writeable(map, i)) continue; - /* If we're in the region the user is trying to read */ - if (p >= *ppos) { - /* ...but not beyond it */ - if (buf_pos + tot_len + 1 >= count) - break; - - /* Format the register */ - snprintf(buf + buf_pos, count - buf_pos, - "%.*x: %c %c %c %c\n", - reg_len, i, - regmap_readable(map, i) ? 'y' : 'n', - regmap_writeable(map, i) ? 'y' : 'n', - regmap_volatile(map, i) ? 'y' : 'n', - regmap_precious(map, i) ? 'y' : 'n'); - - buf_pos += tot_len; - } - p += tot_len; - } - - ret = buf_pos; - - if (copy_to_user(user_buf, buf, buf_pos)) { - ret = -EFAULT; - goto out; + /* Format the register */ + seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, + regmap_readable(map, i) ? 'y' : 'n', + regmap_writeable(map, i) ? 'y' : 'n', + regmap_volatile(map, i) ? 'y' : 'n', + regmap_precious(map, i) ? 'y' : 'n'); } - *ppos += buf_pos; + return 0; +} -out: - kfree(buf); - return ret; +static int access_open(struct inode *inode, struct file *file) +{ + return single_open(file, regmap_access_show, inode->i_private); } static const struct file_operations regmap_access_fops = { - .open = simple_open, - .read = regmap_access_read_file, - .llseek = default_llseek, + .open = access_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; static ssize_t regmap_cache_only_write_file(struct file *file, diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 8d16db533527..9b0d202414d0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -39,8 +39,11 @@ struct regmap_irq_chip_data { unsigned int *mask_buf; unsigned int *mask_buf_def; unsigned int *wake_buf; + unsigned int *type_buf; + unsigned int *type_buf_def; unsigned int irq_reg_stride; + unsigned int type_reg_stride; }; static inline const @@ -144,6 +147,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data) } } + for (i = 0; i < d->chip->num_type_reg; i++) { + if (!d->type_buf_def[i]) + continue; + reg = d->chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (d->chip->type_invert) + ret = regmap_update_bits(d->map, reg, + d->type_buf_def[i], ~d->type_buf[i]); + else + ret = regmap_update_bits(d->map, reg, + d->type_buf_def[i], d->type_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync type in %x\n", + reg); + } + if (d->chip->runtime_pm) pm_runtime_put(map->dev); @@ -178,6 +197,38 @@ static void regmap_irq_disable(struct irq_data *data) d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; } +static int regmap_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + int reg = irq_data->type_reg_offset / map->reg_stride; + + if (!(irq_data->type_rising_mask | irq_data->type_falling_mask)) + return 0; + + d->type_buf[reg] &= ~(irq_data->type_falling_mask | + irq_data->type_rising_mask); + switch (type) { + case IRQ_TYPE_EDGE_FALLING: + d->type_buf[reg] |= irq_data->type_falling_mask; + break; + + case IRQ_TYPE_EDGE_RISING: + d->type_buf[reg] |= irq_data->type_rising_mask; + break; + + case IRQ_TYPE_EDGE_BOTH: + d->type_buf[reg] |= (irq_data->type_falling_mask | + irq_data->type_rising_mask); + break; + + default: + return -EINVAL; + } + return 0; +} + static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); @@ -204,6 +255,7 @@ static const struct irq_chip regmap_irq_chip = { .irq_bus_sync_unlock = regmap_irq_sync_unlock, .irq_disable = regmap_irq_disable, .irq_enable = regmap_irq_enable, + .irq_set_type = regmap_irq_set_type, .irq_set_wake = regmap_irq_set_wake, }; @@ -386,28 +438,40 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; - d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) goto err_alloc; - d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->mask_buf) goto err_alloc; - d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->mask_buf_def) goto err_alloc; if (chip->wake_base) { - d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->wake_buf) goto err_alloc; } + if (chip->num_type_reg) { + d->type_buf_def = kcalloc(chip->num_type_reg, + sizeof(unsigned int), GFP_KERNEL); + if (!d->type_buf_def) + goto err_alloc; + + d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int), + GFP_KERNEL); + if (!d->type_buf) + goto err_alloc; + } + d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; d->irq = irq; @@ -420,10 +484,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; + if (chip->type_reg_stride) + d->type_reg_stride = chip->type_reg_stride; + else + d->type_reg_stride = 1; + if (!map->use_single_read && map->reg_stride == 1 && d->irq_reg_stride == 1) { - d->status_reg_buf = kmalloc(map->format.val_bytes * - chip->num_regs, GFP_KERNEL); + d->status_reg_buf = kmalloc_array(chip->num_regs, + map->format.val_bytes, + GFP_KERNEL); if (!d->status_reg_buf) goto err_alloc; } @@ -511,6 +581,33 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, } } + if (chip->num_type_reg) { + for (i = 0; i < chip->num_irqs; i++) { + reg = chip->irqs[i].type_reg_offset / map->reg_stride; + d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask | + chip->irqs[i].type_falling_mask; + } + for (i = 0; i < chip->num_type_reg; ++i) { + if (!d->type_buf_def[i]) + continue; + + reg = chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (chip->type_invert) + ret = regmap_update_bits(map, reg, + d->type_buf_def[i], 0xFF); + else + ret = regmap_update_bits(map, reg, + d->type_buf_def[i], 0x0); + if (ret != 0) { + dev_err(map->dev, + "Failed to set type in 0x%x: %x\n", + reg, ret); + goto err_alloc; + } + } + } + if (irq_base) d->domain = irq_domain_add_legacy(map->dev->of_node, chip->num_irqs, irq_base, 0, @@ -541,6 +638,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, err_domain: /* Should really dispose of the domain but... */ err_alloc: + kfree(d->type_buf); + kfree(d->type_buf_def); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); @@ -564,6 +663,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) free_irq(irq, d); irq_domain_remove(d->domain); + kfree(d->type_buf); + kfree(d->type_buf_def); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 426a57e41ac7..8812bfb9e3b8 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -61,6 +61,33 @@ static int regmap_mmio_regbits_check(size_t reg_bits) } } +static int regmap_mmio_get_min_stride(size_t val_bits) +{ + int min_stride; + + switch (val_bits) { + case 8: + /* The core treats 0 as 1 */ + min_stride = 0; + return 0; + case 16: + min_stride = 2; + break; + case 32: + min_stride = 4; + break; +#ifdef CONFIG_64BIT + case 64: + min_stride = 8; + break; +#endif + default: + return -EINVAL; + } + + return min_stride; +} + static inline void regmap_mmio_count_check(size_t count, u32 offset) { BUG_ON(count <= offset); @@ -106,17 +133,17 @@ static int regmap_mmio_gather_write(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - writeb(*(u8 *)val, ctx->regs + offset); + __raw_writeb(*(u8 *)val, ctx->regs + offset); break; case 2: - writew(*(u16 *)val, ctx->regs + offset); + __raw_writew(*(u16 *)val, ctx->regs + offset); break; case 4: - writel(*(u32 *)val, ctx->regs + offset); + __raw_writel(*(u32 *)val, ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - writeq(*(u64 *)val, ctx->regs + offset); + __raw_writeq(*(u64 *)val, ctx->regs + offset); break; #endif default: @@ -166,17 +193,17 @@ static int regmap_mmio_read(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - *(u8 *)val = readb(ctx->regs + offset); + *(u8 *)val = __raw_readb(ctx->regs + offset); break; case 2: - *(u16 *)val = readw(ctx->regs + offset); + *(u16 *)val = __raw_readw(ctx->regs + offset); break; case 4: - *(u32 *)val = readl(ctx->regs + offset); + *(u32 *)val = __raw_readl(ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - *(u64 *)val = readq(ctx->regs + offset); + *(u64 *)val = __raw_readq(ctx->regs + offset); break; #endif default: @@ -231,26 +258,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, if (config->pad_bits) return ERR_PTR(-EINVAL); - switch (config->val_bits) { - case 8: - /* The core treats 0 as 1 */ - min_stride = 0; - break; - case 16: - min_stride = 2; - break; - case 32: - min_stride = 4; - break; -#ifdef CONFIG_64BIT - case 64: - min_stride = 8; - break; -#endif - break; - default: - return ERR_PTR(-EINVAL); - } + min_stride = regmap_mmio_get_min_stride(config->val_bits); + if (min_stride < 0) + return ERR_PTR(min_stride); if (config->reg_stride < min_stride) return ERR_PTR(-EINVAL); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 4ac63c0e50c7..ee54e841de4a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -245,6 +245,28 @@ static void regmap_format_32_native(void *buf, unsigned int val, *(u32 *)buf = val << shift; } +#ifdef CONFIG_64BIT +static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) +{ + __be64 *b = buf; + + b[0] = cpu_to_be64((u64)val << shift); +} + +static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) +{ + __le64 *b = buf; + + b[0] = cpu_to_le64((u64)val << shift); +} + +static void regmap_format_64_native(void *buf, unsigned int val, + unsigned int shift) +{ + *(u64 *)buf = (u64)val << shift; +} +#endif + static void regmap_parse_inplace_noop(void *buf) { } @@ -332,6 +354,41 @@ static unsigned int regmap_parse_32_native(const void *buf) return *(u32 *)buf; } +#ifdef CONFIG_64BIT +static unsigned int regmap_parse_64_be(const void *buf) +{ + const __be64 *b = buf; + + return be64_to_cpu(b[0]); +} + +static unsigned int regmap_parse_64_le(const void *buf) +{ + const __le64 *b = buf; + + return le64_to_cpu(b[0]); +} + +static void regmap_parse_64_be_inplace(void *buf) +{ + __be64 *b = buf; + + b[0] = be64_to_cpu(b[0]); +} + +static void regmap_parse_64_le_inplace(void *buf) +{ + __le64 *b = buf; + + b[0] = le64_to_cpu(b[0]); +} + +static unsigned int regmap_parse_64_native(const void *buf) +{ + return *(u64 *)buf; +} +#endif + static void regmap_lock_mutex(void *__map) { struct regmap *map = __map; @@ -712,6 +769,21 @@ struct regmap *__regmap_init(struct device *dev, } break; +#ifdef CONFIG_64BIT + case 64: + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_64_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_64_native; + break; + default: + goto err_map; + } + break; +#endif + default: goto err_map; } @@ -771,6 +843,28 @@ struct regmap *__regmap_init(struct device *dev, goto err_map; } break; +#ifdef CONFIG_64BIT + case 64: + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_64_be; + map->format.parse_val = regmap_parse_64_be; + map->format.parse_inplace = regmap_parse_64_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_64_le; + map->format.parse_val = regmap_parse_64_le; + map->format.parse_inplace = regmap_parse_64_le_inplace; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_64_native; + map->format.parse_val = regmap_parse_64_native; + break; + default: + goto err_map; + } + break; +#endif } if (map->format.format_write) { @@ -1513,7 +1607,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -1540,7 +1634,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -1714,7 +1808,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, if (map->bus && !map->format.parse_inplace) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; /* @@ -1983,7 +2077,7 @@ static int _regmap_multi_reg_write(struct regmap *map, int reg = regs[i].reg; if (!map->writeable_reg(map->dev, reg)) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; } @@ -2133,7 +2227,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, if (val_len % map->format.val_bytes) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -2260,7 +2354,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -2296,7 +2390,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; if (val_count == 0) return -EINVAL; @@ -2414,7 +2508,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_bytes = map->format.val_bytes; bool vol = regmap_volatile_range(map, reg, val_count); - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { @@ -2488,11 +2582,19 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, * we assume that the values are native * endian. */ +#ifdef CONFIG_64BIT + u64 *u64 = val; +#endif u32 *u32 = val; u16 *u16 = val; u8 *u8 = val; switch (map->format.val_bytes) { +#ifdef CONFIG_64BIT + case 8: + u64[i] = ival; + break; +#endif case 4: u32[i] = ival; break; diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 59d8d0d14824..c466f752b067 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -668,11 +668,36 @@ static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) core->id.rev, core->id.class); } -static int __init bcma_modinit(void) +static unsigned int bcma_bus_registered; + +/* + * If built-in, bus has to be registered early, before any driver calls + * bcma_driver_register. + * Otherwise registering driver would trigger BUG in driver_register. + */ +static int __init bcma_init_bus_register(void) { int err; + if (bcma_bus_registered) + return 0; + err = bus_register(&bcma_bus_type); + if (!err) + bcma_bus_registered = 1; + + return err; +} +#ifndef MODULE +fs_initcall(bcma_init_bus_register); +#endif + +/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */ +static int __init bcma_modinit(void) +{ + int err; + + err = bcma_init_bus_register(); if (err) return err; @@ -691,7 +716,7 @@ static int __init bcma_modinit(void) return err; } -fs_initcall(bcma_modinit); +module_init(bcma_modinit); static void __exit bcma_modexit(void) { diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 0422c47261c3..b38bd06d564c 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -514,14 +514,9 @@ cciss_proc_write(struct file *file, const char __user *buf, if (!buf || length > PAGE_SIZE - 1) return -EINVAL; - buffer = (char *)__get_free_page(GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - err = -EFAULT; - if (copy_from_user(buffer, buf, length)) - goto out; - buffer[length] = '\0'; + buffer = memdup_user_nul(buf, length); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); #ifdef CONFIG_CISS_SCSI_TAPE if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { @@ -537,8 +532,7 @@ cciss_proc_write(struct file *file, const char __user *buf, /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ -out: - free_page((unsigned long)buffer); + kfree(buffer); return err; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 3457ac8c03e2..34997d8ecd64 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2029,13 +2029,10 @@ static int exec_drive_taskfile(struct driver_data *dd, } if (taskout) { - outbuf = kzalloc(taskout, GFP_KERNEL); - if (outbuf == NULL) { - err = -ENOMEM; - goto abort; - } - if (copy_from_user(outbuf, buf + outtotal, taskout)) { - err = -EFAULT; + outbuf = memdup_user(buf + outtotal, taskout); + if (IS_ERR(outbuf)) { + err = PTR_ERR(outbuf); + outbuf = NULL; goto abort; } outbuf_dma = pci_map_single(dd->pdev, @@ -2050,14 +2047,10 @@ static int exec_drive_taskfile(struct driver_data *dd, } if (taskin) { - inbuf = kzalloc(taskin, GFP_KERNEL); - if (inbuf == NULL) { - err = -ENOMEM; - goto abort; - } - - if (copy_from_user(inbuf, buf + intotal, taskin)) { - err = -EFAULT; + inbuf = memdup_user(buf + intotal, taskin); + if (IS_ERR(inbuf)) { + err = PTR_ERR(inbuf); + inbuf = NULL; goto abort; } inbuf_dma = pci_map_single(dd->pdev, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 93b3f99b6865..e4c5cc107934 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -827,6 +827,7 @@ static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, .ioctl = nbd_ioctl, + .compat_ioctl = nbd_ioctl, }; #if IS_ENABLED(CONFIG_DEBUG_FS) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8162475d96b5..09e3c0d87ecc 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd) { struct request_queue *q = NULL; + if (cmd->rq) + q = cmd->rq->q; + switch (queue_mode) { case NULL_Q_MQ: blk_mq_end_request(cmd->rq, 0); @@ -229,23 +232,19 @@ static void end_cmd(struct nullb_cmd *cmd) break; case NULL_Q_BIO: bio_endio(cmd->bio); - goto free_cmd; + break; } - if (cmd->rq) - q = cmd->rq->q; + free_cmd(cmd); /* Restart queue if needed, as we are freeing a tag */ - if (q && !q->mq_ops && blk_queue_stopped(q)) { + if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - if (blk_queue_stopped(q)) - blk_start_queue(q); + blk_start_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); } -free_cmd: - free_cmd(cmd); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index d8b2488aaade..34997df132e2 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -203,14 +203,11 @@ static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf, char *buf; ssize_t st; - buf = kzalloc(cnt, GFP_KERNEL); - if (!buf) - return -ENOMEM; + buf = memdup_user(ubuf, cnt); + if (IS_ERR(buf)) + return PTR_ERR(buf); - st = copy_from_user(buf, ubuf, cnt); - if (!st) - st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, - buf, 1); + st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); kfree(buf); if (st) return st; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index f9099940c272..41fb1a917b17 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, goto unmap; for (n = 0, i = 0; n < nseg; n++) { + uint8_t first_sect, last_sect; + if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { /* Map indirect segments */ if (segments) @@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); } i = n % SEGS_PER_INDIRECT_FRAME; + pending_req->segments[n]->gref = segments[i].gref; - seg[n].nsec = segments[i].last_sect - - segments[i].first_sect + 1; - seg[n].offset = (segments[i].first_sect << 9); - if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || - (segments[i].last_sect < segments[i].first_sect)) { + + first_sect = READ_ONCE(segments[i].first_sect); + last_sect = READ_ONCE(segments[i].last_sect); + if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { rc = -EINVAL; goto unmap; } + + seg[n].nsec = last_sect - first_sect + 1; + seg[n].offset = first_sect << 9; preq->nr_sects += seg[n].nsec; } diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 68e87a037b99..c929ae22764c 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_x86_32_request *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = src->operation; - switch (src->operation) { + dst->operation = READ_ONCE(src->operation); + switch (dst->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: @@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = src->operation; - switch (src->operation) { + dst->operation = READ_ONCE(src->operation); + switch (dst->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index 364f82b34d03..5b0ef7bbe8ac 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c @@ -178,10 +178,8 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id return -ENODEV; data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); - if (!data) { - BT_ERR("Can't allocate memory for data structure"); + if (!data) return -ENOMEM; - } data->udev = udev; data->state = BCM203X_LOAD_MINIDRV; diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 616ec2ac1b22..3bf4ec60e073 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -324,7 +324,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch return -ENOMEM; } - bt_cb(skb)->pkt_type = pkt_type; + hci_skb_pkt_type(skb) = pkt_type; data->reassembly = skb; } else { @@ -469,9 +469,10 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) unsigned char buf[3]; int sent = 0, size, count; - BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); + BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, + hci_skb_pkt_type(skb), skb->len); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; @@ -484,7 +485,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) } /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); count = skb->len; @@ -635,10 +636,8 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i /* Initialize control structure and load firmware */ data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL); - if (!data) { - BT_ERR("Can't allocate memory for control structure"); - goto done; - } + if (!data) + return -ENOMEM; data->udev = udev; data->bulk_in_ep = bulk_in_ep->desc.bEndpointAddress; diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 36fa1c958c74..c0b3b5576992 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -261,7 +261,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info) if (!skb) break; - if (bt_cb(skb)->pkt_type & 0x80) { + if (hci_skb_pkt_type(skb) & 0x80) { /* Disable RTS */ info->ctrl_reg |= REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); @@ -279,13 +279,13 @@ static void bluecard_write_wakeup(struct bluecard_info *info) /* Mark the buffer as dirty */ clear_bit(ready_bit, &(info->tx_state)); - if (bt_cb(skb)->pkt_type & 0x80) { + if (hci_skb_pkt_type(skb) & 0x80) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); DEFINE_WAIT(wait); unsigned char baud_reg; - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case PKT_BAUD_RATE_460800: baud_reg = REG_CONTROL_BAUD_RATE_460800; break; @@ -402,9 +402,9 @@ static void bluecard_receive(struct bluecard_info *info, if (info->rx_state == RECV_WAIT_PACKET_TYPE) { - bt_cb(info->rx_skb)->pkt_type = buf[i]; + hci_skb_pkt_type(info->rx_skb) = buf[i]; - switch (bt_cb(info->rx_skb)->pkt_type) { + switch (hci_skb_pkt_type(info->rx_skb)) { case 0x00: /* init packet */ @@ -436,7 +436,8 @@ static void bluecard_receive(struct bluecard_info *info, default: /* unknown packet */ - BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); @@ -578,21 +579,21 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) switch (baud) { case 460800: cmd[4] = 0x00; - bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_460800; break; case 230400: cmd[4] = 0x01; - bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_230400; break; case 115200: cmd[4] = 0x02; - bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200; break; case 57600: /* Fall through... */ default: cmd[4] = 0x03; - bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600; break; } @@ -660,7 +661,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bluecard_info *info = hci_get_drvdata(hdev); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; @@ -673,7 +674,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) } /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&(info->txq), skb); bluecard_write_wakeup(info); diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index 49c397e21b39..fd6b53e9bbf2 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -295,9 +295,9 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return -ENOMEM; /* Prepend skb with frame type */ - *skb_push(skb, 1) = bt_cb(skb)->pkt_type; + *skb_push(skb, 1) = hci_skb_pkt_type(skb); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 5803aaed958f..8165ef2fe877 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -246,10 +246,10 @@ static void bt3c_receive(struct bt3c_info *info) if (info->rx_state == RECV_WAIT_PACKET_TYPE) { - bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L); + hci_skb_pkt_type(info->rx_skb) = inb(iobase + DATA_L); inb(iobase + DATA_H); - switch (bt_cb(info->rx_skb)->pkt_type) { + switch (hci_skb_pkt_type(info->rx_skb)) { case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; @@ -268,7 +268,8 @@ static void bt3c_receive(struct bt3c_info *info) default: /* Unknown packet */ - BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); @@ -411,7 +412,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) struct bt3c_info *info = hci_get_drvdata(hdev); unsigned long flags; - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; @@ -424,7 +425,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) } /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&(info->txq), skb); spin_lock_irqsave(&(info->lock), flags); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 1f13e617bf56..fce154855718 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -73,6 +73,48 @@ int btintel_check_bdaddr(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(btintel_check_bdaddr); +int btintel_enter_mfg(struct hci_dev *hdev) +{ + const u8 param[] = { 0x01, 0x00 }; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_enter_mfg); + +int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) +{ + u8 param[] = { 0x00, 0x00 }; + struct sk_buff *skb; + + /* The 2nd command parameter specifies the manufacturing exit method: + * 0x00: Just disable the manufacturing mode (0x00). + * 0x01: Disable manufacturing mode and reset with patches deactivated. + * 0x02: Disable manufacturing mode and reset with patches activated. + */ + if (reset) + param[1] |= patched ? 0x02 : 0x01; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_exit_mfg); + int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; @@ -126,37 +168,19 @@ EXPORT_SYMBOL_GPL(btintel_set_diag); int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) { - struct sk_buff *skb; - u8 param[2]; - int err; - - param[0] = 0x01; - param[1] = 0x00; - - skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: Entering Intel manufacturer mode failed (%d)", - hdev->name, err); - return PTR_ERR(skb); - } - kfree_skb(skb); + int err, ret; - err = btintel_set_diag(hdev, enable); + err = btintel_enter_mfg(hdev); + if (err) + return err; - param[0] = 0x00; - param[1] = 0x00; + ret = btintel_set_diag(hdev, enable); - skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)", - hdev->name, err); - return PTR_ERR(skb); - } - kfree_skb(skb); + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; - return err; + return ret; } EXPORT_SYMBOL_GPL(btintel_set_diag_mfg); @@ -309,39 +333,46 @@ EXPORT_SYMBOL_GPL(btintel_set_event_mask); int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) { - struct sk_buff *skb; - u8 param[2]; - int err; + int err, ret; - param[0] = 0x01; - param[1] = 0x00; + err = btintel_enter_mfg(hdev); + if (err) + return err; - skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: Entering Intel manufacturer mode failed (%d)", - hdev->name, err); - return PTR_ERR(skb); - } - kfree_skb(skb); + ret = btintel_set_event_mask(hdev, debug); - err = btintel_set_event_mask(hdev, debug); + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; - param[0] = 0x00; - param[1] = 0x00; + return ret; +} +EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); - skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); +int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)", - hdev->name, err); + bt_dev_err(hdev, "Reading Intel version information failed (%ld)", + PTR_ERR(skb)); return PTR_ERR(skb); } + + if (skb->len != sizeof(*ver)) { + bt_dev_err(hdev, "Intel version event size mismatch"); + kfree_skb(skb); + return -EILSEQ; + } + + memcpy(ver, skb->data, sizeof(*ver)); + kfree_skb(skb); - return err; + return 0; } -EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); +EXPORT_SYMBOL_GPL(btintel_read_version); /* ------- REGMAP IBT SUPPORT ------- */ diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 07e58e05a7fa..1e8955aaafed 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -72,6 +72,8 @@ struct intel_secure_send_result { #if IS_ENABLED(CONFIG_BT_INTEL) int btintel_check_bdaddr(struct hci_dev *hdev); +int btintel_enter_mfg(struct hci_dev *hdev); +int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); int btintel_set_diag(struct hci_dev *hdev, bool enable); int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable); @@ -83,6 +85,7 @@ int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); int btintel_set_event_mask(struct hci_dev *hdev, bool debug); int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug); +int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver); struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, u16 opcode_write); @@ -94,6 +97,16 @@ static inline int btintel_check_bdaddr(struct hci_dev *hdev) return -EOPNOTSUPP; } +static inline int btintel_enter_mfg(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) +{ + return -EOPNOTSUPP; +} + static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { return -EOPNOTSUPP; @@ -140,6 +153,12 @@ static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) return -EOPNOTSUPP; } +static inline int btintel_read_version(struct hci_dev *hdev, + struct intel_version *ver) +{ + return -EOPNOTSUPP; +} + static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, u16 opcode_write) diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index 27a9aac25583..05904732e6f1 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -89,6 +89,7 @@ struct btmrvl_adapter { wait_queue_head_t event_hs_wait_q; u8 cmd_complete; bool is_suspended; + bool is_suspending; }; struct btmrvl_private { diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 6af917331962..f25a825a693f 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -196,7 +196,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode, if (len) memcpy(skb_put(skb, len), param, len); - bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; + hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT; skb_queue_head(&priv->adapter->tx_queue, skb); @@ -387,7 +387,7 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; - skb->data[3] = bt_cb(skb)->pkt_type; + skb->data[3] = hci_skb_pkt_type(skb); if (priv->hw_host_to_card) ret = priv->hw_host_to_card(priv, skb->data, skb->len); @@ -434,9 +434,14 @@ static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btmrvl_private *priv = hci_get_drvdata(hdev); - BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len); + BT_DBG("type=%d, len=%d", hci_skb_pkt_type(skb), skb->len); - switch (bt_cb(skb)->pkt_type) { + if (priv->adapter->is_suspending || priv->adapter->is_suspended) { + BT_ERR("%s: Device is suspending or suspended", __func__); + return -EBUSY; + } + + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; @@ -452,7 +457,8 @@ static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb) skb_queue_tail(&priv->adapter->tx_queue, skb); - wake_up_interruptible(&priv->main_thread.wait_q); + if (!priv->adapter->is_suspended) + wake_up_interruptible(&priv->main_thread.wait_q); return 0; } @@ -543,7 +549,7 @@ static int btmrvl_setup(struct hci_dev *hdev) if (ret) return ret; - priv->btmrvl_dev.gpio_gap = 0xffff; + priv->btmrvl_dev.gpio_gap = 0xfffe; btmrvl_check_device_tree(priv); @@ -643,7 +649,8 @@ static int btmrvl_service_main_thread(void *data) if (adapter->ps_state == PS_SLEEP) continue; - if (!priv->btmrvl_dev.tx_dnld_rdy) + if (!priv->btmrvl_dev.tx_dnld_rdy || + priv->adapter->is_suspended) continue; skb = skb_dequeue(&adapter->tx_queue); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 71ea2a3af293..6ed8acfcfa9c 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -698,7 +698,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_EVENT_PKT: - bt_cb(skb)->pkt_type = type; + hci_skb_pkt_type(skb) = type; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); @@ -713,7 +713,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) break; case MRVL_VENDOR_PKT: - bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); @@ -1112,7 +1112,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) */ if (btmrvl_sdio_verify_fw_download(card, pollnum)) { BT_ERR("FW failed to be active in time!"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto done; } sdio_release_host(card->func); @@ -1544,10 +1545,10 @@ static int btmrvl_sdio_suspend(struct device *dev) } priv = card->priv; + priv->adapter->is_suspending = true; hcidev = priv->btmrvl_dev.hcidev; BT_DBG("%s: SDIO suspend", hcidev->name); hci_suspend_dev(hcidev); - skb_queue_purge(&priv->adapter->tx_queue); if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { @@ -1556,6 +1557,7 @@ static int btmrvl_sdio_suspend(struct device *dev) } } + priv->adapter->is_suspending = false; priv->adapter->is_suspended = true; /* We will keep the power when hs enabled successfully */ diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 7b624423a7e8..2b05661e3818 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -86,7 +86,7 @@ static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; - skb->data[3] = bt_cb(skb)->pkt_type; + skb->data[3] = hci_skb_pkt_type(skb); err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); if (err < 0) { @@ -158,7 +158,7 @@ static int btsdio_rx_packet(struct btsdio_data *data) data->hdev->stat.byte_rx += len; - bt_cb(skb)->pkt_type = hdr[3]; + hci_skb_pkt_type(skb) = hdr[3]; err = hci_recv_frame(data->hdev, skb); if (err < 0) @@ -252,7 +252,7 @@ static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s", hdev->name); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index bb8e4025fb9e..9624b29f8349 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -200,9 +200,9 @@ static void btuart_receive(struct btuart_info *info) if (info->rx_state == RECV_WAIT_PACKET_TYPE) { - bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX); + hci_skb_pkt_type(info->rx_skb) = inb(iobase + UART_RX); - switch (bt_cb(info->rx_skb)->pkt_type) { + switch (hci_skb_pkt_type(info->rx_skb)) { case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; @@ -221,7 +221,8 @@ static void btuart_receive(struct btuart_info *info) default: /* Unknown packet */ - BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); @@ -424,7 +425,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btuart_info *info = hci_get_drvdata(hdev); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; @@ -437,7 +438,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) } /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&(info->txq), skb); btuart_write_wakeup(info); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 92f0ee388f9e..a191e318fab8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Toshiba Corp - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + /* Intel Bluetooth USB Bootloader (RAM module) */ { USB_DEVICE(0x8087, 0x0a5a), .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, @@ -437,22 +441,22 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) break; } - bt_cb(skb)->pkt_type = HCI_EVENT_PKT; - bt_cb(skb)->expect = HCI_EVENT_HDR_SIZE; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + hci_skb_expect(skb) = HCI_EVENT_HDR_SIZE; } - len = min_t(uint, bt_cb(skb)->expect, count); + len = min_t(uint, hci_skb_expect(skb), count); memcpy(skb_put(skb, len), buffer, len); count -= len; buffer += len; - bt_cb(skb)->expect -= len; + hci_skb_expect(skb) -= len; if (skb->len == HCI_EVENT_HDR_SIZE) { /* Complete event header */ - bt_cb(skb)->expect = hci_event_hdr(skb)->plen; + hci_skb_expect(skb) = hci_event_hdr(skb)->plen; - if (skb_tailroom(skb) < bt_cb(skb)->expect) { + if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; @@ -461,7 +465,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) } } - if (bt_cb(skb)->expect == 0) { + if (!hci_skb_expect(skb)) { /* Complete frame */ data->recv_event(data->hdev, skb); skb = NULL; @@ -492,24 +496,24 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) break; } - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - bt_cb(skb)->expect = HCI_ACL_HDR_SIZE; + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; + hci_skb_expect(skb) = HCI_ACL_HDR_SIZE; } - len = min_t(uint, bt_cb(skb)->expect, count); + len = min_t(uint, hci_skb_expect(skb), count); memcpy(skb_put(skb, len), buffer, len); count -= len; buffer += len; - bt_cb(skb)->expect -= len; + hci_skb_expect(skb) -= len; if (skb->len == HCI_ACL_HDR_SIZE) { __le16 dlen = hci_acl_hdr(skb)->dlen; /* Complete ACL header */ - bt_cb(skb)->expect = __le16_to_cpu(dlen); + hci_skb_expect(skb) = __le16_to_cpu(dlen); - if (skb_tailroom(skb) < bt_cb(skb)->expect) { + if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; @@ -518,7 +522,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) } } - if (bt_cb(skb)->expect == 0) { + if (!hci_skb_expect(skb)) { /* Complete frame */ hci_recv_frame(data->hdev, skb); skb = NULL; @@ -549,22 +553,22 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) break; } - bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; - bt_cb(skb)->expect = HCI_SCO_HDR_SIZE; + hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; + hci_skb_expect(skb) = HCI_SCO_HDR_SIZE; } - len = min_t(uint, bt_cb(skb)->expect, count); + len = min_t(uint, hci_skb_expect(skb), count); memcpy(skb_put(skb, len), buffer, len); count -= len; buffer += len; - bt_cb(skb)->expect -= len; + hci_skb_expect(skb) -= len; if (skb->len == HCI_SCO_HDR_SIZE) { /* Complete SCO header */ - bt_cb(skb)->expect = hci_sco_hdr(skb)->dlen; + hci_skb_expect(skb) = hci_sco_hdr(skb)->dlen; - if (skb_tailroom(skb) < bt_cb(skb)->expect) { + if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; @@ -573,7 +577,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) } } - if (bt_cb(skb)->expect == 0) { + if (!hci_skb_expect(skb)) { /* Complete frame */ hci_recv_frame(data->hdev, skb); skb = NULL; @@ -1257,7 +1261,7 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s", hdev->name); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: urb = alloc_ctrl_urb(hdev, skb); if (IS_ERR(urb)) @@ -1642,13 +1646,8 @@ static int btusb_setup_intel(struct hci_dev *hdev) struct sk_buff *skb; const struct firmware *fw; const u8 *fw_ptr; - int disable_patch; - struct intel_version *ver; - - const u8 mfg_enable[] = { 0x01, 0x00 }; - const u8 mfg_disable[] = { 0x00, 0x00 }; - const u8 mfg_reset_deactivate[] = { 0x00, 0x01 }; - const u8 mfg_reset_activate[] = { 0x00, 0x02 }; + int disable_patch, err; + struct intel_version ver; BT_DBG("%s", hdev->name); @@ -1674,35 +1673,22 @@ static int btusb_setup_intel(struct hci_dev *hdev) * The returned information are hardware variant and revision plus * firmware variant, revision and build number. */ - skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s reading Intel fw version command failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - - if (skb->len != sizeof(*ver)) { - BT_ERR("%s Intel version event length mismatch", hdev->name); - kfree_skb(skb); - return -EIO; - } - - ver = (struct intel_version *)skb->data; + err = btintel_read_version(hdev, &ver); + if (err) + return err; BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x", - hdev->name, ver->hw_platform, ver->hw_variant, - ver->hw_revision, ver->fw_variant, ver->fw_revision, - ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy, - ver->fw_patch_num); + hdev->name, ver.hw_platform, ver.hw_variant, ver.hw_revision, + ver.fw_variant, ver.fw_revision, ver.fw_build_num, + ver.fw_build_ww, ver.fw_build_yy, ver.fw_patch_num); /* fw_patch_num indicates the version of patch the device currently * have. If there is no patch data in the device, it is always 0x00. * So, if it is other than 0x00, no need to patch the device again. */ - if (ver->fw_patch_num) { + if (ver.fw_patch_num) { BT_INFO("%s: Intel device is already patched. patch num: %02x", - hdev->name, ver->fw_patch_num); - kfree_skb(skb); + hdev->name, ver.fw_patch_num); goto complete; } @@ -1712,31 +1698,21 @@ static int btusb_setup_intel(struct hci_dev *hdev) * If no patch file is found, allow the device to operate without * a patch. */ - fw = btusb_setup_intel_get_fw(hdev, ver); - if (!fw) { - kfree_skb(skb); + fw = btusb_setup_intel_get_fw(hdev, &ver); + if (!fw) goto complete; - } fw_ptr = fw->data; - kfree_skb(skb); - - /* This Intel specific command enables the manufacturer mode of the - * controller. - * + /* Enable the manufacturer mode of the controller. * Only while this mode is enabled, the driver can download the * firmware patch data and configuration parameters. */ - skb = __hci_cmd_sync(hdev, 0xfc11, 2, mfg_enable, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s entering Intel manufacturer mode failed (%ld)", - hdev->name, PTR_ERR(skb)); + err = btintel_enter_mfg(hdev); + if (err) { release_firmware(fw); - return PTR_ERR(skb); + return err; } - kfree_skb(skb); - disable_patch = 1; /* The firmware data file consists of list of Intel specific HCI @@ -1776,14 +1752,9 @@ static int btusb_setup_intel(struct hci_dev *hdev) /* Patching completed successfully and disable the manufacturer mode * with reset and activate the downloaded firmware patches. */ - skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_activate), - mfg_reset_activate, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - kfree_skb(skb); + err = btintel_exit_mfg(hdev, true, true); + if (err) + return err; BT_INFO("%s: Intel Bluetooth firmware patch completed and activated", hdev->name); @@ -1792,14 +1763,9 @@ static int btusb_setup_intel(struct hci_dev *hdev) exit_mfg_disable: /* Disable the manufacturer mode without reset */ - skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_disable), mfg_disable, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - kfree_skb(skb); + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name); @@ -1811,14 +1777,9 @@ exit_mfg_deactivate: /* Patching failed. Disable the manufacturer mode with reset and * deactivate the downloaded firmware patches. */ - skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_deactivate), - mfg_reset_deactivate, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - kfree_skb(skb); + err = btintel_exit_mfg(hdev, true, false); + if (err) + return err; BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated", hdev->name); @@ -1853,7 +1814,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) *skb_put(skb, 1) = 0x00; - bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } @@ -1945,7 +1906,7 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s", hdev->name); - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: if (test_bit(BTUSB_BOOTLOADER, &data->flags)) { struct hci_command_hdr *cmd = (void *)skb->data; @@ -2005,7 +1966,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) 0x00, 0x08, 0x04, 0x00 }; struct btusb_data *data = hci_get_drvdata(hdev); struct sk_buff *skb; - struct intel_version *ver; + struct intel_version ver; struct intel_boot_params *params; const struct firmware *fw; const u8 *fw_ptr; @@ -2023,28 +1984,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * is in bootloader mode or if it already has operational firmware * loaded. */ - skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Reading Intel version information failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - - if (skb->len != sizeof(*ver)) { - BT_ERR("%s: Intel version event size mismatch", hdev->name); - kfree_skb(skb); - return -EILSEQ; - } - - ver = (struct intel_version *)skb->data; + err = btintel_read_version(hdev, &ver); + if (err) + return err; /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ - if (ver->hw_platform != 0x37) { + if (ver.hw_platform != 0x37) { BT_ERR("%s: Unsupported Intel hardware platform (%u)", - hdev->name, ver->hw_platform); - kfree_skb(skb); + hdev->name, ver.hw_platform); return -EINVAL; } @@ -2053,14 +2002,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * put in place to ensure correct forward compatibility options * when newer hardware variants come along. */ - if (ver->hw_variant != 0x0b) { + if (ver.hw_variant != 0x0b) { BT_ERR("%s: Unsupported Intel hardware variant (%u)", - hdev->name, ver->hw_variant); - kfree_skb(skb); + hdev->name, ver.hw_variant); return -EINVAL; } - btintel_version_info(hdev, ver); + btintel_version_info(hdev, &ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies @@ -2075,8 +2023,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ - if (ver->fw_variant == 0x23) { - kfree_skb(skb); + if (ver.fw_variant == 0x23) { clear_bit(BTUSB_BOOTLOADER, &data->flags); btintel_check_bdaddr(hdev); return 0; @@ -2085,15 +2032,12 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* If the device is not in bootloader mode, then the only possible * choice is to return an error and abort the device initialization. */ - if (ver->fw_variant != 0x06) { + if (ver.fw_variant != 0x06) { BT_ERR("%s: Unsupported Intel firmware variant (%u)", - hdev->name, ver->fw_variant); - kfree_skb(skb); + hdev->name, ver.fw_variant); return -ENODEV; } - kfree_skb(skb); - /* Read the secure boot parameters to identify the operating * details of the bootloader. */ diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 57eb935aedc7..24a652f9252b 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -249,10 +249,10 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) hst = hci_get_drvdata(hdev); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); - BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, - skb->len); + BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); /* Insert skb to shared transport layer's transmit queue. * Freeing skb memory is taken care in shared transport layer, @@ -268,7 +268,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ST accepted our skb. So, Go ahead and do rest */ hdev->stat.byte_tx += len; - ti_st_tx_complete(hst, bt_cb(skb)->pkt_type); + ti_st_tx_complete(hst, hci_skb_pkt_type(skb)); return 0; } diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 5026f66fac88..6317c6f323bf 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -239,7 +239,7 @@ static void dtl1_receive(struct dtl1_info *info) info->rx_count = nsh->len + (nsh->len & 0x0001); break; case RECV_WAIT_DATA: - bt_cb(info->rx_skb)->pkt_type = nsh->type; + hci_skb_pkt_type(info->rx_skb) = nsh->type; /* remove PAD byte if it exists */ if (nsh->len & 0x0001) { @@ -250,7 +250,7 @@ static void dtl1_receive(struct dtl1_info *info) /* remove NSH */ skb_pull(info->rx_skb, NSHL); - switch (bt_cb(info->rx_skb)->pkt_type) { + switch (hci_skb_pkt_type(info->rx_skb)) { case 0x80: /* control data for the Nokia Card */ dtl1_control(info, info->rx_skb); @@ -259,12 +259,13 @@ static void dtl1_receive(struct dtl1_info *info) case 0x83: case 0x84: /* send frame to the HCI layer */ - bt_cb(info->rx_skb)->pkt_type &= 0x0f; + hci_skb_pkt_type(info->rx_skb) &= 0x0f; hci_recv_frame(info->hdev, info->rx_skb); break; default: /* unknown packet */ - BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); kfree_skb(info->rx_skb); break; } @@ -386,7 +387,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) struct sk_buff *s; struct nsh nsh; - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; nsh.type = 0x81; diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index d776dfd51478..0ccf6bf01ed4 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -205,7 +205,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct ath_struct *ath = hu->priv; - if (bt_cb(skb)->pkt_type == HCI_SCODATA_PKT) { + if (hci_skb_pkt_type(skb) == HCI_SCODATA_PKT) { kfree_skb(skb); return 0; } @@ -213,7 +213,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) /* Update power management enable flag with parameters of * HCI sleep enable vendor specific HCI command. */ - if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { + if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { struct hci_command_hdr *hdr = (void *)skb->data; if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) @@ -223,7 +223,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&ath->txq, skb); set_bit(HCI_UART_SENDING, &hu->tx_state); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index cb852cc750b7..5f3de181e744 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -472,7 +472,7 @@ static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb) bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&bcm->txq, skb); return 0; @@ -814,8 +814,16 @@ static const struct hci_uart_proto bcm_proto = { #ifdef CONFIG_ACPI static const struct acpi_device_id bcm_acpi_match[] = { + { "BCM2E1A", 0 }, { "BCM2E39", 0 }, + { "BCM2E3A", 0 }, + { "BCM2E3D", 0 }, + { "BCM2E3F", 0 }, + { "BCM2E40", 0 }, + { "BCM2E64", 0 }, + { "BCM2E65", 0 }, { "BCM2E67", 0 }, + { "BCM2E7B", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index d0b615a932d1..064f2fefad62 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -155,7 +155,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) return 0; } - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&bcsp->rel, skb); @@ -231,7 +231,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, if (!nskb) return NULL; - bt_cb(nskb)->pkt_type = pkt_type; + hci_skb_pkt_type(nskb) = pkt_type; bcsp_slip_msgdelim(nskb); @@ -291,7 +291,10 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) skb = skb_dequeue(&bcsp->unrel); if (skb != NULL) { - struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); + struct sk_buff *nskb; + + nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, + hci_skb_pkt_type(skb)); if (nskb) { kfree_skb(skb); return nskb; @@ -310,8 +313,10 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) if (bcsp->unack.qlen < BCSP_TXWINSIZE) { skb = skb_dequeue(&bcsp->rel); if (skb != NULL) { - struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, - bt_cb(skb)->pkt_type); + struct sk_buff *nskb; + + nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, + hci_skb_pkt_type(skb)); if (nskb) { __skb_queue_tail(&bcsp->unack, skb); mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); @@ -412,7 +417,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu) if (!nskb) return; memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); - bt_cb(nskb)->pkt_type = BCSP_LE_PKT; + hci_skb_pkt_type(nskb) = BCSP_LE_PKT; skb_queue_head(&bcsp->unrel, nskb); hci_uart_tx_wakeup(hu); @@ -494,14 +499,14 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu) bcsp_pkt_cull(bcsp); if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && bcsp->rx_skb->data[0] & 0x80) { - bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT; + hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && bcsp->rx_skb->data[0] & 0x80) { - bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; + hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { - bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT; + hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && !(bcsp->rx_skb->data[0] & 0x80)) { @@ -523,7 +528,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu) hdr.evt = 0xff; hdr.plen = bcsp->rx_skb->len; memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); - bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; + hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; hci_recv_frame(hu->hdev, bcsp->rx_skb); } else { diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index a6fce48da0fb..635597b6e168 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -108,7 +108,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&h4->txq, skb); return 0; @@ -184,8 +184,8 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, if (!skb) return ERR_PTR(-ENOMEM); - bt_cb(skb)->pkt_type = (&pkts[i])->type; - bt_cb(skb)->expect = (&pkts[i])->hlen; + hci_skb_pkt_type(skb) = (&pkts[i])->type; + hci_skb_expect(skb) = (&pkts[i])->hlen; break; } @@ -197,18 +197,18 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, buffer += 1; } - len = min_t(uint, bt_cb(skb)->expect - skb->len, count); + len = min_t(uint, hci_skb_expect(skb) - skb->len, count); memcpy(skb_put(skb, len), buffer, len); count -= len; buffer += len; /* Check for partial packet */ - if (skb->len < bt_cb(skb)->expect) + if (skb->len < hci_skb_expect(skb)) continue; for (i = 0; i < pkts_count; i++) { - if (bt_cb(skb)->pkt_type == (&pkts[i])->type) + if (hci_skb_pkt_type(skb) == (&pkts[i])->type) break; } @@ -228,7 +228,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, case 1: /* Single octet variable length */ dlen = skb->data[(&pkts[i])->loff]; - bt_cb(skb)->expect += dlen; + hci_skb_expect(skb) += dlen; if (skb_tailroom(skb) < dlen) { kfree_skb(skb); @@ -239,7 +239,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, /* Double octet variable length */ dlen = get_unaligned_le16(skb->data + (&pkts[i])->loff); - bt_cb(skb)->expect += dlen; + hci_skb_expect(skb) += dlen; if (skb_tailroom(skb) < dlen) { kfree_skb(skb); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index abee2216fdeb..0879d64b1caf 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -51,7 +51,7 @@ #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01) #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01) #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f) -#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4)) +#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4)) #define SLIP_DELIMITER 0xc0 #define SLIP_ESC 0xdb @@ -107,7 +107,7 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len) if (!nskb) return; - bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT; + hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT; memcpy(skb_put(nskb, len), data, len); @@ -116,18 +116,14 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len) static u8 h5_cfg_field(struct h5 *h5) { - u8 field = 0; - /* Sliding window size (first 3 bits) */ - field |= (h5->tx_win & 7); - - return field; + return h5->tx_win & 0x07; } static void h5_timed_event(unsigned long arg) { const unsigned char sync_req[] = { 0x01, 0x7e }; - unsigned char conf_req[] = { 0x03, 0xfc, 0x01 }; + unsigned char conf_req[3] = { 0x03, 0xfc }; struct hci_uart *hu = (struct hci_uart *)arg; struct h5 *h5 = hu->priv; struct sk_buff *skb; @@ -285,7 +281,7 @@ static void h5_handle_internal_rx(struct hci_uart *hu) struct h5 *h5 = hu->priv; const unsigned char sync_req[] = { 0x01, 0x7e }; const unsigned char sync_rsp[] = { 0x02, 0x7d }; - unsigned char conf_req[] = { 0x03, 0xfc, 0x01 }; + unsigned char conf_req[3] = { 0x03, 0xfc }; const unsigned char conf_rsp[] = { 0x04, 0x7b }; const unsigned char wakeup_req[] = { 0x05, 0xfa }; const unsigned char woken_req[] = { 0x06, 0xf9 }; @@ -317,7 +313,7 @@ static void h5_handle_internal_rx(struct hci_uart *hu) h5_link_control(hu, conf_req, 3); } else if (memcmp(data, conf_rsp, 2) == 0) { if (H5_HDR_LEN(hdr) > 2) - h5->tx_win = (data[2] & 7); + h5->tx_win = (data[2] & 0x07); BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win); h5->state = H5_ACTIVE; hci_uart_init_ready(hu); @@ -360,7 +356,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu) case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: - bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr); + hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr); /* Remove Three-wire header */ skb_pull(h5->rx_skb, 4); @@ -562,7 +558,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) return 0; } - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&h5->rel, skb); @@ -573,7 +569,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) break; default: - BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type); + BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb)); kfree_skb(skb); break; } @@ -642,7 +638,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type, if (!nskb) return NULL; - bt_cb(nskb)->pkt_type = pkt_type; + hci_skb_pkt_type(nskb) = pkt_type; h5_slip_delim(nskb); @@ -697,7 +693,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu) skb = skb_dequeue(&h5->unrel); if (skb) { - nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type, + nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), skb->data, skb->len); if (nskb) { kfree_skb(skb); @@ -715,7 +711,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu) skb = skb_dequeue(&h5->rel); if (skb) { - nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type, + nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), skb->data, skb->len); if (nskb) { __skb_queue_tail(&h5->unack, skb); diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 4a414a5a3165..3d63ea37bd4c 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -186,7 +186,7 @@ static int intel_lpm_suspend(struct hci_uart *hu) } memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend)); - bt_cb(skb)->pkt_type = HCI_LPM_PKT; + hci_skb_pkt_type(skb) = HCI_LPM_PKT; set_bit(STATE_LPM_TRANSACTION, &intel->flags); @@ -230,7 +230,7 @@ static int intel_lpm_resume(struct hci_uart *hu) return -ENOMEM; } - bt_cb(skb)->pkt_type = HCI_LPM_WAKE_PKT; + hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT; set_bit(STATE_LPM_TRANSACTION, &intel->flags); @@ -272,7 +272,7 @@ static int intel_lpm_host_wake(struct hci_uart *hu) memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack, sizeof(lpm_resume_ack)); - bt_cb(skb)->pkt_type = HCI_LPM_PKT; + hci_skb_pkt_type(skb) = HCI_LPM_PKT; /* LPM flow is a priority, enqueue packet at list head */ skb_queue_head(&intel->txq, skb); @@ -467,7 +467,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) *skb_put(skb, 1) = 0x00; - bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } @@ -502,7 +502,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) /* Device will not accept speed change if Intel version has not been * previously requested. */ - skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version information failed (%ld)", PTR_ERR(skb)); @@ -517,7 +517,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) } memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd)); - bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_uart_set_flow_control(hu, true); @@ -542,7 +542,7 @@ static int intel_setup(struct hci_uart *hu) struct intel_device *idev = NULL; struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; - struct intel_version *ver; + struct intel_version ver; struct intel_boot_params *params; struct list_head *p; const struct firmware *fw; @@ -590,35 +590,16 @@ static int intel_setup(struct hci_uart *hu) * is in bootloader mode or if it already has operational firmware * loaded. */ - skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - bt_dev_err(hdev, "Reading Intel version information failed (%ld)", - PTR_ERR(skb)); - return PTR_ERR(skb); - } - - if (skb->len != sizeof(*ver)) { - bt_dev_err(hdev, "Intel version event size mismatch"); - kfree_skb(skb); - return -EILSEQ; - } - - ver = (struct intel_version *)skb->data; - if (ver->status) { - bt_dev_err(hdev, "Intel version command failure (%02x)", - ver->status); - err = -bt_to_errno(ver->status); - kfree_skb(skb); + err = btintel_read_version(hdev, &ver); + if (err) return err; - } /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ - if (ver->hw_platform != 0x37) { + if (ver.hw_platform != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", - ver->hw_platform); - kfree_skb(skb); + ver.hw_platform); return -EINVAL; } @@ -627,14 +608,13 @@ static int intel_setup(struct hci_uart *hu) * put in place to ensure correct forward compatibility options * when newer hardware variants come along. */ - if (ver->hw_variant != 0x0b) { + if (ver.hw_variant != 0x0b) { bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", - ver->hw_variant); - kfree_skb(skb); + ver.hw_variant); return -EINVAL; } - btintel_version_info(hdev, ver); + btintel_version_info(hdev, &ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies @@ -649,8 +629,7 @@ static int intel_setup(struct hci_uart *hu) * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ - if (ver->fw_variant == 0x23) { - kfree_skb(skb); + if (ver.fw_variant == 0x23) { clear_bit(STATE_BOOTLOADER, &intel->flags); btintel_check_bdaddr(hdev); return 0; @@ -659,19 +638,16 @@ static int intel_setup(struct hci_uart *hu) /* If the device is not in bootloader mode, then the only possible * choice is to return an error and abort the device initialization. */ - if (ver->fw_variant != 0x06) { + if (ver.fw_variant != 0x06) { bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", - ver->fw_variant); - kfree_skb(skb); + ver.fw_variant); return -ENODEV; } - kfree_skb(skb); - /* Read the secure boot parameters to identify the operating * details of the bootloader. */ - skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); + skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)", PTR_ERR(skb)); @@ -881,7 +857,7 @@ done: set_bit(STATE_BOOTING, &intel->flags); skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param, - HCI_INIT_TIMEOUT); + HCI_CMD_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1126,7 +1102,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu) return skb; if (test_bit(STATE_BOOTLOADER, &intel->flags) && - (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) { + (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) { struct hci_command_hdr *cmd = (void *)skb->data; __u16 opcode = le16_to_cpu(cmd->opcode); @@ -1140,7 +1116,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu) } /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); return skb; } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 96bcec5598c2..73202624133b 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -162,7 +162,7 @@ restart: break; } - hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type); + hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); kfree_skb(skb); } @@ -248,7 +248,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); - BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); + BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); hu->proto->enqueue(hu, skb); @@ -461,13 +462,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - /* Flush any pending characters in the driver and line discipline. */ - - /* FIXME: why is this needed. Note don't use ldisc_ref here as the - open path is before the ldisc is referencable */ - - if (tty->ldisc->ops->flush_buffer) - tty->ldisc->ops->flush_buffer(tty); + /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); return 0; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 9ee24b075f79..02692fe30279 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -307,7 +307,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); /* lock hcill state */ spin_lock_irqsave(&ll->hcill_lock, flags); @@ -493,7 +493,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count) return -ENOMEM; } - bt_cb(ll->rx_skb)->pkt_type = type; + hci_skb_pkt_type(ll->rx_skb) = type; } return count; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 71325e443e46..683c2b642057 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -678,7 +678,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) qca->tx_ibs_state); /* Prepend skb with frame type */ - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); /* Don't go to sleep in middle of patch download or * Out-Of-Band(GPIOs control) sleep is selected. @@ -873,7 +873,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) /* Assign commands to change baudrate and packet type. */ memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); - bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index ed888e302bc3..80783dcb7f57 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -80,7 +80,7 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); - memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); wake_up_interruptible(&data->read_wait); @@ -140,7 +140,7 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode) return -EBUSY; } - bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; *skb_put(skb, 1) = 0xff; *skb_put(skb, 1) = opcode; @@ -183,7 +183,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data, return -ENODEV; } - bt_cb(skb)->pkt_type = pkt_type; + hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; @@ -234,7 +234,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data, data->hdev->stat.byte_tx += len; - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 846bc29c157d..25996e256110 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, ret = _sunxi_rsb_run_xfer(rsb); if (ret) - goto out; + goto unlock; *buf = readl(rsb->regs + RSB_DATA); +unlock: mutex_unlock(&rsb->lock); -out: return ret; } @@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb) */ static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { - { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ + { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ - { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ + { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */ }; static u8 sunxi_rsb_get_rtaddr(u16 hwaddr) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index c206ccda899b..1b257ea9776a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3186,15 +3186,11 @@ static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_DVD)) return -ENOSYS; - s = kmalloc(size, GFP_KERNEL); - if (!s) - return -ENOMEM; + s = memdup_user(arg, size); + if (IS_ERR(s)) + return PTR_ERR(s); cd_dbg(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); - if (copy_from_user(s, arg, size)) { - kfree(s); - return -EFAULT; - } ret = dvd_read_struct(cdi, s, cgc); if (ret) diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index 6c4f4b5a9dd3..073db9558379 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c @@ -20,6 +20,7 @@ #include <linux/fcntl.h> #include <linux/init.h> #include <linux/mutex.h> +#include <linux/pagemap.h> #include <asm/uaccess.h> #include <asm/nvram.h> #ifdef CONFIG_PPC_PMAC @@ -33,24 +34,8 @@ static ssize_t nvram_len; static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) { - switch (origin) { - case 0: - break; - case 1: - offset += file->f_pos; - break; - case 2: - offset += nvram_len; - break; - default: - offset = -1; - } - if (offset < 0) - return -EINVAL; - - file->f_pos = offset; - - return file->f_pos; + return generic_file_llseek_size(file, offset, origin, + MAX_LFS_FILESIZE, nvram_len); } static ssize_t read_nvram(struct file *file, char __user *buf, diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6f497aa1b276..9203f2d130c0 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, goto out; } - mutex_lock(&reading_mutex); + if (mutex_lock_interruptible(&reading_mutex)) { + err = -ERESTARTSYS; + goto out_put; + } if (!data_avail) { bytes_read = rng_get_data(rng, rng_buffer, rng_buffer_size(), @@ -288,6 +291,7 @@ out: out_unlock_reading: mutex_unlock(&reading_mutex); +out_put: put_rng(rng); goto out; } diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index a405cdcd8dd2..8da14f1a1f56 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -17,7 +17,7 @@ #include <linux/init.h> #include <linux/random.h> #include <linux/hw_random.h> -#include <linux/timer.h> +#include <linux/workqueue.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> @@ -29,11 +29,11 @@ /* param1: ptr, param2: count, param3: flag */ static u32 (*omap3_rom_rng_call)(u32, u32, u32); -static struct timer_list idle_timer; +static struct delayed_work idle_work; static int rng_idle; static struct clk *rng_clk; -static void omap3_rom_rng_idle(unsigned long data) +static void omap3_rom_rng_idle(struct work_struct *work) { int r; @@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) u32 r; u32 ptr; - del_timer_sync(&idle_timer); + cancel_delayed_work_sync(&idle_work); if (rng_idle) { clk_prepare_enable(rng_clk); r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); @@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) ptr = virt_to_phys(buf); r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); - mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); + schedule_delayed_work(&idle_work, msecs_to_jiffies(500)); if (r != 0) return -EINVAL; return 0; @@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) return -EINVAL; } - setup_timer(&idle_timer, omap3_rom_rng_idle, 0); + INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle); rng_clk = devm_clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_clk)) { pr_err("unable to get RNG clock\n"); @@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) static int omap3_rom_rng_remove(struct platform_device *pdev) { + cancel_delayed_work_sync(&idle_work); hwrng_unregister(&omap3_rom_rng_ops); clk_disable_unprepare(rng_clk); return 0; diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 0c98a9d51a24..44ce80606944 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng) * RNG configuration like it used to be the case in this * register */ if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { - if (!cpu_has_xstore_enabled) { + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); return -ENODEV; @@ -200,8 +200,9 @@ static int __init mod_init(void) { int err; - if (!cpu_has_xstore) + if (!boot_cpu_has(X86_FEATURE_XSTORE)) return -ENODEV; + pr_info("VIA RNG detected\n"); err = hwrng_register(&via_rng); if (err) { diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c index e5d3e3f7a49b..67d426470e53 100644 --- a/drivers/char/mbcs.c +++ b/drivers/char/mbcs.c @@ -26,6 +26,7 @@ #include <linux/uio.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/pagemap.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -451,31 +452,8 @@ mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * o static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence) { - loff_t newpos; - - switch (whence) { - case SEEK_SET: - newpos = off; - break; - - case SEEK_CUR: - newpos = filp->f_pos + off; - break; - - case SEEK_END: - newpos = MBCS_SRAM_SIZE + off; - break; - - default: /* can't happen */ - return -EINVAL; - } - - if (newpos < 0) - return -EINVAL; - - filp->f_pos = newpos; - - return newpos; + return generic_file_llseek_size(filp, off, whence, MAX_LFS_FILESIZE, + MBCS_SRAM_SIZE); } static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset) diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 97c2d8d433d6..01292328a456 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -110,6 +110,7 @@ #include <linux/io.h> #include <linux/uaccess.h> #include <linux/mutex.h> +#include <linux/pagemap.h> static DEFINE_MUTEX(nvram_mutex); @@ -213,21 +214,8 @@ void nvram_set_checksum(void) static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) { - switch (origin) { - case 0: - /* nothing to do */ - break; - case 1: - offset += file->f_pos; - break; - case 2: - offset += NVRAM_BYTES; - break; - default: - return -EINVAL; - } - - return (offset >= 0) ? (file->f_pos = offset) : -EINVAL; + return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, + NVRAM_BYTES); } static ssize_t nvram_read(struct file *file, char __user *buf, diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index e371480d3639..dbe598de9b74 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -277,36 +277,7 @@ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", (unsigned int) offset, orig); - switch (orig) { - case 0: - if (offset < 0) { - ret = -EINVAL; - break; - } - - if ((unsigned int) offset > gbFlashSize) { - ret = -EINVAL; - break; - } - - file->f_pos = (unsigned int) offset; - ret = file->f_pos; - break; - case 1: - if ((file->f_pos + offset) > gbFlashSize) { - ret = -EINVAL; - break; - } - if ((file->f_pos + offset) < 0) { - ret = -EINVAL; - break; - } - file->f_pos += offset; - ret = file->f_pos; - break; - default: - ret = -EINVAL; - } + ret = no_seek_end_llseek_size(file, offset, orig, gbFlashSize); mutex_unlock(&flash_mutex); return ret; } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 2eb5f0efae90..b251013eef0a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -28,10 +28,16 @@ config CLKSRC_MMIO bool config DIGICOLOR_TIMER - bool + bool "Digicolor timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + Enables the support for the digicolor timer driver. config DW_APB_TIMER - bool + bool "DW APB timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + Enables the support for the dw_apb timer. config DW_APB_TIMER_OF bool @@ -39,47 +45,77 @@ config DW_APB_TIMER_OF select CLKSRC_OF config ROCKCHIP_TIMER - bool + bool "Rockchip timer driver" if COMPILE_TEST + depends on ARM || ARM64 select CLKSRC_OF + help + Enables the support for the rockchip timer driver. config ARMADA_370_XP_TIMER - bool + bool "Armada 370 and XP timer driver" if COMPILE_TEST + depends on ARM select CLKSRC_OF + help + Enables the support for the Armada 370 and XP timer driver. config MESON6_TIMER - bool + bool "Meson6 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO + help + Enables the support for the Meson6 timer driver. config ORION_TIMER + bool "Orion timer driver" if COMPILE_TEST + depends on ARM select CLKSRC_OF select CLKSRC_MMIO - bool + help + Enables the support for the Orion timer driver config SUN4I_TIMER + bool "Sun4i timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO - bool + help + Enables support for the Sun4i timer. config SUN5I_HSTIMER + bool "Sun5i timer driver" if COMPILE_TEST select CLKSRC_MMIO - bool + depends on COMMON_CLK + help + Enables support the Sun5i timer. config TEGRA_TIMER - bool + bool "Tegra timer driver" if COMPILE_TEST + depends on ARM + help + Enables support for the Tegra driver. config VT8500_TIMER - bool + bool "VT8500 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + Enables support for the VT8500 driver. config CADENCE_TTC_TIMER - bool + bool "Cadence TTC timer driver" if COMPILE_TEST + depends on COMMON_CLK + help + Enables support for the cadence ttc driver. config ASM9260_TIMER - bool + bool "ASM9260 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO select CLKSRC_OF + help + Enables support for the ASM9260 timer. config CLKSRC_NOMADIK_MTU - bool - depends on (ARCH_NOMADIK || ARCH_U8500) + bool "Nomakdik clocksource driver" if COMPILE_TEST + depends on ARM select CLKSRC_MMIO help Support for Multi Timer Unit. MTU provides access @@ -93,9 +129,8 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK Use the Multi Timer Unit as the sched_clock. config CLKSRC_DBX500_PRCMU - bool "Clocksource PRCMU Timer" - depends on UX500_SOC_DB8500 - default y + bool "Clocksource PRCMU Timer" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS help Use the always on PRCMU Timer as clocksource @@ -116,13 +151,18 @@ config CLKSRC_EFM32 event device. config CLKSRC_LPC32XX - bool + bool "Clocksource for LPC32XX" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO select CLKSRC_OF + help + Support for the LPC32XX clocksource. config CLKSRC_PISTACHIO - bool + bool "Clocksource for Pistachio SoC" if COMPILE_TEST select CLKSRC_OF + help + Enables the clocksource for the Pistachio SoC. config CLKSRC_TI_32K bool "Texas Instruments 32.768 Hz Clocksource" if COMPILE_TEST @@ -199,13 +239,14 @@ config CLKSRC_METAG_GENERIC This option enables support for the Meta per-thread timers. config CLKSRC_EXYNOS_MCT - def_bool y if ARCH_EXYNOS - depends on !ARM64 + bool "Exynos multi core timer driver" if COMPILE_TEST + depends on ARM help Support for Multi Core Timer controller on Exynos SoCs. config CLKSRC_SAMSUNG_PWM - bool + bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS help This is a new clocksource driver for the PWM timer found in Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver @@ -213,7 +254,8 @@ config CLKSRC_SAMSUNG_PWM needed only on systems that do not have the Exynos MCT available. config FSL_FTM_TIMER - bool + bool "Freescale FlexTimer Module driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS help Support for Freescale FlexTimer Module (FTM) timer. @@ -226,9 +268,12 @@ config SYS_SUPPORTS_SH_CMT bool config MTK_TIMER + bool "Mediatek timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_OF select CLKSRC_MMIO - bool + help + Support for Mediatek timer driver. config SYS_SUPPORTS_SH_MTU2 bool @@ -279,7 +324,12 @@ config EM_TIMER_STI such as EMEV2 from former NEC Electronics. config CLKSRC_QCOM - bool + bool "Qualcomm MSM timer" if COMPILE_TEST + depends on ARM + select CLKSRC_OF + help + This enables the clocksource and the per CPU clockevent driver for the + Qualcomm SoCs. config CLKSRC_VERSATILE bool "ARM Versatile (Express) reference platforms clock source" @@ -298,21 +348,40 @@ config CLKSRC_MIPS_GIC select CLKSRC_OF config CLKSRC_TANGO_XTAL - bool + bool "Clocksource for Tango SoC" if COMPILE_TEST + depends on ARM select CLKSRC_OF + select CLKSRC_MMIO + help + This enables the clocksource for Tango SoC config CLKSRC_PXA - def_bool y if ARCH_PXA || ARCH_SA1100 - select CLKSRC_OF if OF + bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO help This enables OST0 support available on PXA and SA-11x0 platforms. +config H8300_TMR8 + bool "Clockevent timer for the H8300 platform" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + This enables the 8 bits timer for the H8300 platform. + config H8300_TMR16 - bool + bool "Clockevent timer for the H83069 platform" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + This enables the 16 bits timer for the H8300 platform with the + H83069 cpu. config H8300_TPU - bool + bool "Clocksource for the H8300 platform" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + help + This enables the clocksource for the H8300 platform with the + H8S2678 cpu. config CLKSRC_IMX_GPT bool "Clocksource using i.MX GPT" if COMPILE_TEST @@ -320,8 +389,7 @@ config CLKSRC_IMX_GPT select CLKSRC_MMIO config CLKSRC_ST_LPC - bool - depends on ARCH_STI + bool "Low power clocksource found in the LPC" if COMPILE_TEST select CLKSRC_OF if OF help Enable this option to use the Low Power controller timer diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 56bd16e77ae3..dc2b8997f6e6 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -60,7 +60,7 @@ obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o -obj-$(CONFIG_H8300) += h8300_timer8.o +obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o obj-$(CONFIG_H8300_TPU) += h8300_tpu.o obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 6eab88985670..28037d0b8dcd 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -109,10 +109,8 @@ static void acpi_pm_check_blacklist(struct pci_dev *dev) /* the bug has been fixed in PIIX4M */ if (dev->revision < 3) { - printk(KERN_WARNING "* Found PM-Timer Bug on the chipset." - " Due to workarounds for a bug,\n" - "* this clock source is slow. Consider trying" - " other clock sources\n"); + pr_warn("* Found PM-Timer Bug on the chipset. Due to workarounds for a bug,\n" + "* this clock source is slow. Consider trying other clock sources\n"); acpi_pm_need_workaround(); } @@ -125,12 +123,9 @@ static void acpi_pm_check_graylist(struct pci_dev *dev) if (acpi_pm_good) return; - printk(KERN_WARNING "* The chipset may have PM-Timer Bug. Due to" - " workarounds for a bug,\n" - "* this clock source is slow. If you are sure your timer" - " does not have\n" - "* this bug, please use \"acpi_pm_good\" to disable the" - " workaround\n"); + pr_warn("* The chipset may have PM-Timer Bug. Due to workarounds for a bug,\n" + "* this clock source is slow. If you are sure your timer does not have\n" + "* this bug, please use \"acpi_pm_good\" to disable the workaround\n"); acpi_pm_need_workaround(); } @@ -162,8 +157,7 @@ static int verify_pmtmr_rate(void) /* Check that the PMTMR delta is within 5% of what we expect */ if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 || delta > (PMTMR_EXPECTED_RATE * 21) / 20) { - printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% " - "of normal - aborting.\n", + pr_info("PM-Timer running at invalid rate: %lu%% of normal - aborting.\n", 100UL * delta / PMTMR_EXPECTED_RATE); return -1; } @@ -199,15 +193,14 @@ static int __init init_acpi_pm_clocksource(void) break; if ((value2 < value1) && ((value2) < 0xFFF)) break; - printk(KERN_INFO "PM-Timer had inconsistent results:" - " %#llx, %#llx - aborting.\n", - value1, value2); + pr_info("PM-Timer had inconsistent results: %#llx, %#llx - aborting.\n", + value1, value2); pmtmr_ioport = 0; return -EINVAL; } if (i == ACPI_PM_READ_CHECKS) { - printk(KERN_INFO "PM-Timer failed consistency check " - " (%#llx) - aborting.\n", value1); + pr_info("PM-Timer failed consistency check (%#llx) - aborting.\n", + value1); pmtmr_ioport = 0; return -ENODEV; } diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index a2cb6fae9295..d189d8cb69f7 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -99,17 +99,17 @@ static void gt_compare_set(unsigned long delta, int periodic) counter += delta; ctrl = GT_CONTROL_TIMER_ENABLE; - writel(ctrl, gt_base + GT_CONTROL); - writel(lower_32_bits(counter), gt_base + GT_COMP0); - writel(upper_32_bits(counter), gt_base + GT_COMP1); + writel_relaxed(ctrl, gt_base + GT_CONTROL); + writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0); + writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1); if (periodic) { - writel(delta, gt_base + GT_AUTO_INC); + writel_relaxed(delta, gt_base + GT_AUTO_INC); ctrl |= GT_CONTROL_AUTO_INC; } ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE; - writel(ctrl, gt_base + GT_CONTROL); + writel_relaxed(ctrl, gt_base + GT_CONTROL); } static int gt_clockevent_shutdown(struct clock_event_device *evt) @@ -195,12 +195,23 @@ static cycle_t gt_clocksource_read(struct clocksource *cs) return gt_counter_read(); } +static void gt_resume(struct clocksource *cs) +{ + unsigned long ctrl; + + ctrl = readl(gt_base + GT_CONTROL); + if (!(ctrl & GT_CONTROL_TIMER_ENABLE)) + /* re-enable timer on resume */ + writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); +} + static struct clocksource gt_clocksource = { .name = "arm_global_timer", .rating = 300, .read = gt_clocksource_read, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .resume = gt_resume, }; #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index c76c75006ea6..63345260244d 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -49,20 +49,31 @@ clocksource_to_dw_apb_clocksource(struct clocksource *cs) return container_of(cs, struct dw_apb_clocksource, cs); } -static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs) +static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs) { return readl(timer->base + offs); } -static void apbt_writel(struct dw_apb_timer *timer, unsigned long val, - unsigned long offs) +static inline void apbt_writel(struct dw_apb_timer *timer, u32 val, + unsigned long offs) { writel(val, timer->base + offs); } +static inline u32 apbt_readl_relaxed(struct dw_apb_timer *timer, unsigned long offs) +{ + return readl_relaxed(timer->base + offs); +} + +static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val, + unsigned long offs) +{ + writel_relaxed(val, timer->base + offs); +} + static void apbt_disable_int(struct dw_apb_timer *timer) { - unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); + u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL); ctrl |= APBTMR_CONTROL_INT; apbt_writel(timer, ctrl, APBTMR_N_CONTROL); @@ -81,7 +92,7 @@ void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced) static void apbt_eoi(struct dw_apb_timer *timer) { - apbt_readl(timer, APBTMR_N_EOI); + apbt_readl_relaxed(timer, APBTMR_N_EOI); } static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) @@ -103,7 +114,7 @@ static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) static void apbt_enable_int(struct dw_apb_timer *timer) { - unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); + u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL); /* clear pending intr */ apbt_readl(timer, APBTMR_N_EOI); ctrl &= ~APBTMR_CONTROL_INT; @@ -113,7 +124,7 @@ static void apbt_enable_int(struct dw_apb_timer *timer) static int apbt_shutdown(struct clock_event_device *evt) { struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); - unsigned long ctrl; + u32 ctrl; pr_debug("%s CPU %d state=shutdown\n", __func__, cpumask_first(evt->cpumask)); @@ -127,7 +138,7 @@ static int apbt_shutdown(struct clock_event_device *evt) static int apbt_set_oneshot(struct clock_event_device *evt) { struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); - unsigned long ctrl; + u32 ctrl; pr_debug("%s CPU %d state=oneshot\n", __func__, cpumask_first(evt->cpumask)); @@ -160,7 +171,7 @@ static int apbt_set_periodic(struct clock_event_device *evt) { struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); - unsigned long ctrl; + u32 ctrl; pr_debug("%s CPU %d state=periodic\n", __func__, cpumask_first(evt->cpumask)); @@ -196,17 +207,17 @@ static int apbt_resume(struct clock_event_device *evt) static int apbt_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long ctrl; + u32 ctrl; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); /* Disable timer */ - ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); + ctrl = apbt_readl_relaxed(&dw_ced->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* write new count */ - apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); + apbt_writel_relaxed(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); ctrl |= APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); return 0; } @@ -323,7 +334,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) * start count down from 0xffff_ffff. this is done by toggling the * enable bit then load initial load count to ~0. */ - unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL); + u32 ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); @@ -338,11 +349,12 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) static cycle_t __apbt_read_clocksource(struct clocksource *cs) { - unsigned long current_count; + u32 current_count; struct dw_apb_clocksource *dw_cs = clocksource_to_dw_apb_clocksource(cs); - current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); + current_count = apbt_readl_relaxed(&dw_cs->timer, + APBTMR_N_CURRENT_VALUE); return (cycle_t)~current_count; } diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index a19a3f619cc7..860843cef572 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -16,6 +16,7 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/delay.h> #include <linux/dw_apb_timer.h> #include <linux/of.h> #include <linux/of_address.h> @@ -130,6 +131,17 @@ static void __init init_sched_clock(void) sched_clock_register(read_sched_clock, 32, sched_rate); } +#ifdef CONFIG_ARM +static unsigned long dw_apb_delay_timer_read(void) +{ + return ~readl_relaxed(sched_io_base); +} + +static struct delay_timer dw_apb_delay_timer = { + .read_current_timer = dw_apb_delay_timer_read, +}; +#endif + static int num_called; static void __init dw_apb_timer_init(struct device_node *timer) { @@ -142,6 +154,10 @@ static void __init dw_apb_timer_init(struct device_node *timer) pr_debug("%s: found clocksource timer\n", __func__); add_clocksource(timer); init_sched_clock(); +#ifdef CONFIG_ARM + dw_apb_delay_timer.freq = sched_rate; + register_current_timer_delay(&dw_apb_delay_timer); +#endif break; default: break; diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c index 0e076c6fc006..75c44079b345 100644 --- a/drivers/clocksource/h8300_timer16.c +++ b/drivers/clocksource/h8300_timer16.c @@ -4,85 +4,56 @@ * Copyright 2015 Yoshinori Sato <ysato@users.sourcefoge.jp> */ -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/platform_device.h> #include <linux/clocksource.h> -#include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> - -#include <asm/segment.h> -#include <asm/irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #define TSTR 0 -#define TSNC 1 -#define TMDR 2 -#define TOLR 3 -#define TISRA 4 -#define TISRB 5 #define TISRC 6 #define TCR 0 -#define TIOR 1 #define TCNT 2 -#define GRA 4 -#define GRB 6 - -#define FLAG_REPROGRAM (1 << 0) -#define FLAG_SKIPEVENT (1 << 1) -#define FLAG_IRQCONTEXT (1 << 2) -#define FLAG_STARTED (1 << 3) -#define ONESHOT 0 -#define PERIODIC 1 - -#define RELATIVE 0 -#define ABSOLUTE 1 +#define bset(b, a) iowrite8(ioread8(a) | (1 << (b)), (a)) +#define bclr(b, a) iowrite8(ioread8(a) & ~(1 << (b)), (a)) struct timer16_priv { - struct platform_device *pdev; struct clocksource cs; - struct irqaction irqaction; unsigned long total_cycles; - unsigned long mapbase; - unsigned long mapcommon; - unsigned long flags; - unsigned short gra; + void __iomem *mapbase; + void __iomem *mapcommon; unsigned short cs_enabled; unsigned char enb; - unsigned char imfa; - unsigned char imiea; unsigned char ovf; - raw_spinlock_t lock; - struct clk *clk; + unsigned char ovie; }; static unsigned long timer16_get_counter(struct timer16_priv *p) { - unsigned long v1, v2, v3; - int o1, o2; + unsigned short v1, v2, v3; + unsigned char o1, o2; - o1 = ctrl_inb(p->mapcommon + TISRC) & p->ovf; + o1 = ioread8(p->mapcommon + TISRC) & p->ovf; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { o2 = o1; - v1 = ctrl_inw(p->mapbase + TCNT); - v2 = ctrl_inw(p->mapbase + TCNT); - v3 = ctrl_inw(p->mapbase + TCNT); - o1 = ctrl_inb(p->mapcommon + TISRC) & p->ovf; + v1 = ioread16be(p->mapbase + TCNT); + v2 = ioread16be(p->mapbase + TCNT); + v3 = ioread16be(p->mapbase + TCNT); + o1 = ioread8(p->mapcommon + TISRC) & p->ovf; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); - v2 |= 0x10000; - return v2; + if (likely(!o1)) + return v2; + else + return v2 + 0x10000; } @@ -90,8 +61,7 @@ static irqreturn_t timer16_interrupt(int irq, void *dev_id) { struct timer16_priv *p = (struct timer16_priv *)dev_id; - ctrl_outb(ctrl_inb(p->mapcommon + TISRA) & ~p->imfa, - p->mapcommon + TISRA); + bclr(p->ovf, p->mapcommon + TISRC); p->total_cycles += 0x10000; return IRQ_HANDLED; @@ -105,13 +75,10 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs) static cycle_t timer16_clocksource_read(struct clocksource *cs) { struct timer16_priv *p = cs_to_priv(cs); - unsigned long flags, raw; - unsigned long value; + unsigned long raw, value; - raw_spin_lock_irqsave(&p->lock, flags); value = p->total_cycles; raw = timer16_get_counter(p); - raw_spin_unlock_irqrestore(&p->lock, flags); return value + raw; } @@ -123,10 +90,10 @@ static int timer16_enable(struct clocksource *cs) WARN_ON(p->cs_enabled); p->total_cycles = 0; - ctrl_outw(0x0000, p->mapbase + TCNT); - ctrl_outb(0x83, p->mapbase + TCR); - ctrl_outb(ctrl_inb(p->mapcommon + TSTR) | p->enb, - p->mapcommon + TSTR); + iowrite16be(0x0000, p->mapbase + TCNT); + iowrite8(0x83, p->mapbase + TCR); + bset(p->ovie, p->mapcommon + TISRC); + bset(p->enb, p->mapcommon + TSTR); p->cs_enabled = true; return 0; @@ -138,116 +105,83 @@ static void timer16_disable(struct clocksource *cs) WARN_ON(!p->cs_enabled); - ctrl_outb(ctrl_inb(p->mapcommon + TSTR) & ~p->enb, - p->mapcommon + TSTR); + bclr(p->ovie, p->mapcommon + TISRC); + bclr(p->enb, p->mapcommon + TSTR); p->cs_enabled = false; } +static struct timer16_priv timer16_priv = { + .cs = { + .name = "h8300_16timer", + .rating = 200, + .read = timer16_clocksource_read, + .enable = timer16_enable, + .disable = timer16_disable, + .mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, +}; + #define REG_CH 0 #define REG_COMM 1 -static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev) +static void __init h8300_16timer_init(struct device_node *node) { - struct resource *res[2]; + void __iomem *base[2]; int ret, irq; unsigned int ch; + struct clk *clk; - p->pdev = pdev; - - res[REG_CH] = platform_get_resource(p->pdev, - IORESOURCE_MEM, REG_CH); - res[REG_COMM] = platform_get_resource(p->pdev, - IORESOURCE_MEM, REG_COMM); - if (!res[REG_CH] || !res[REG_COMM]) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - return -ENXIO; - } - irq = platform_get_irq(p->pdev, 0); - if (irq < 0) { - dev_err(&p->pdev->dev, "failed to get irq\n"); - return irq; + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("failed to get clock for clocksource\n"); + return; } - p->clk = clk_get(&p->pdev->dev, "fck"); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "can't get clk\n"); - return PTR_ERR(p->clk); + base[REG_CH] = of_iomap(node, 0); + if (!base[REG_CH]) { + pr_err("failed to map registers for clocksource\n"); + goto free_clk; } - of_property_read_u32(p->pdev->dev.of_node, "renesas,channel", &ch); - - p->pdev = pdev; - p->mapbase = res[REG_CH]->start; - p->mapcommon = res[REG_COMM]->start; - p->enb = 1 << ch; - p->imfa = 1 << ch; - p->imiea = 1 << (4 + ch); - p->cs.name = pdev->name; - p->cs.rating = 200; - p->cs.read = timer16_clocksource_read; - p->cs.enable = timer16_enable; - p->cs.disable = timer16_disable; - p->cs.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); - p->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; - ret = request_irq(irq, timer16_interrupt, - IRQF_TIMER, pdev->name, p); - if (ret < 0) { - dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); - return ret; + base[REG_COMM] = of_iomap(node, 1); + if (!base[REG_COMM]) { + pr_err("failed to map registers for clocksource\n"); + goto unmap_ch; } - clocksource_register_hz(&p->cs, clk_get_rate(p->clk) / 8); - - return 0; -} - -static int timer16_probe(struct platform_device *pdev) -{ - struct timer16_priv *p = platform_get_drvdata(pdev); - - if (p) { - dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("failed to get irq for clockevent\n"); + goto unmap_comm; } - p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; + of_property_read_u32(node, "renesas,channel", &ch); - return timer16_setup(p, pdev); -} - -static int timer16_remove(struct platform_device *pdev) -{ - return -EBUSY; -} + timer16_priv.mapbase = base[REG_CH]; + timer16_priv.mapcommon = base[REG_COMM]; + timer16_priv.enb = ch; + timer16_priv.ovf = ch; + timer16_priv.ovie = 4 + ch; -static const struct of_device_id timer16_of_table[] = { - { .compatible = "renesas,16bit-timer" }, - { } -}; -static struct platform_driver timer16_driver = { - .probe = timer16_probe, - .remove = timer16_remove, - .driver = { - .name = "h8300h-16timer", - .of_match_table = of_match_ptr(timer16_of_table), + ret = request_irq(irq, timer16_interrupt, + IRQF_TIMER, timer16_priv.cs.name, &timer16_priv); + if (ret < 0) { + pr_err("failed to request irq %d of clocksource\n", irq); + goto unmap_comm; } -}; -static int __init timer16_init(void) -{ - return platform_driver_register(&timer16_driver); -} + clocksource_register_hz(&timer16_priv.cs, + clk_get_rate(clk) / 8); + return; -static void __exit timer16_exit(void) -{ - platform_driver_unregister(&timer16_driver); +unmap_comm: + iounmap(base[REG_COMM]); +unmap_ch: + iounmap(base[REG_CH]); +free_clk: + clk_put(clk); } -subsys_initcall(timer16_init); -module_exit(timer16_exit); -MODULE_AUTHOR("Yoshinori Sato"); -MODULE_DESCRIPTION("H8/300H 16bit Timer Driver"); -MODULE_LICENSE("GPL v2"); +CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init); diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 44375d8b9bc4..c151941e1956 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -8,19 +8,15 @@ */ #include <linux/errno.h> -#include <linux/sched.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/clockchips.h> -#include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> - -#include <asm/irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #define _8TCR 0 #define _8TCSR 2 @@ -28,126 +24,74 @@ #define TCORB 6 #define _8TCNT 8 -#define FLAG_REPROGRAM (1 << 0) -#define FLAG_SKIPEVENT (1 << 1) -#define FLAG_IRQCONTEXT (1 << 2) +#define CMIEA 6 +#define CMFA 6 + #define FLAG_STARTED (1 << 3) -#define ONESHOT 0 -#define PERIODIC 1 +#define SCALE 64 -#define RELATIVE 0 -#define ABSOLUTE 1 +#define bset(b, a) iowrite8(ioread8(a) | (1 << (b)), (a)) +#define bclr(b, a) iowrite8(ioread8(a) & ~(1 << (b)), (a)) struct timer8_priv { - struct platform_device *pdev; struct clock_event_device ced; - struct irqaction irqaction; - unsigned long mapbase; - raw_spinlock_t lock; + void __iomem *mapbase; unsigned long flags; unsigned int rate; - unsigned int tcora; - struct clk *pclk; }; -static unsigned long timer8_get_counter(struct timer8_priv *p) -{ - unsigned long v1, v2, v3; - int o1, o2; - - o1 = ctrl_inb(p->mapbase + _8TCSR) & 0x20; - - /* Make sure the timer value is stable. Stolen from acpi_pm.c */ - do { - o2 = o1; - v1 = ctrl_inw(p->mapbase + _8TCNT); - v2 = ctrl_inw(p->mapbase + _8TCNT); - v3 = ctrl_inw(p->mapbase + _8TCNT); - o1 = ctrl_inb(p->mapbase + _8TCSR) & 0x20; - } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) - || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); - - v2 |= o1 << 10; - return v2; -} - static irqreturn_t timer8_interrupt(int irq, void *dev_id) { struct timer8_priv *p = dev_id; - ctrl_outb(ctrl_inb(p->mapbase + _8TCSR) & ~0x40, - p->mapbase + _8TCSR); - p->flags |= FLAG_IRQCONTEXT; - ctrl_outw(p->tcora, p->mapbase + TCORA); - if (!(p->flags & FLAG_SKIPEVENT)) { - if (clockevent_state_oneshot(&p->ced)) - ctrl_outw(0x0000, p->mapbase + _8TCR); - p->ced.event_handler(&p->ced); - } - p->flags &= ~(FLAG_SKIPEVENT | FLAG_IRQCONTEXT); + if (clockevent_state_oneshot(&p->ced)) + iowrite16be(0x0000, p->mapbase + _8TCR); + + p->ced.event_handler(&p->ced); + + bclr(CMFA, p->mapbase + _8TCSR); return IRQ_HANDLED; } static void timer8_set_next(struct timer8_priv *p, unsigned long delta) { - unsigned long flags; - unsigned long now; - - raw_spin_lock_irqsave(&p->lock, flags); if (delta >= 0x10000) - dev_warn(&p->pdev->dev, "delta out of range\n"); - now = timer8_get_counter(p); - p->tcora = delta; - ctrl_outb(ctrl_inb(p->mapbase + _8TCR) | 0x40, p->mapbase + _8TCR); - if (delta > now) - ctrl_outw(delta, p->mapbase + TCORA); - else - ctrl_outw(now + 1, p->mapbase + TCORA); - - raw_spin_unlock_irqrestore(&p->lock, flags); + pr_warn("delta out of range\n"); + bclr(CMIEA, p->mapbase + _8TCR); + iowrite16be(delta, p->mapbase + TCORA); + iowrite16be(0x0000, p->mapbase + _8TCNT); + bclr(CMFA, p->mapbase + _8TCSR); + bset(CMIEA, p->mapbase + _8TCR); } static int timer8_enable(struct timer8_priv *p) { - p->rate = clk_get_rate(p->pclk) / 64; - ctrl_outw(0xffff, p->mapbase + TCORA); - ctrl_outw(0x0000, p->mapbase + _8TCNT); - ctrl_outw(0x0c02, p->mapbase + _8TCR); + iowrite16be(0xffff, p->mapbase + TCORA); + iowrite16be(0x0000, p->mapbase + _8TCNT); + iowrite16be(0x0c02, p->mapbase + _8TCR); return 0; } static int timer8_start(struct timer8_priv *p) { - int ret = 0; - unsigned long flags; - - raw_spin_lock_irqsave(&p->lock, flags); - - if (!(p->flags & FLAG_STARTED)) - ret = timer8_enable(p); + int ret; - if (ret) - goto out; - p->flags |= FLAG_STARTED; + if ((p->flags & FLAG_STARTED)) + return 0; - out: - raw_spin_unlock_irqrestore(&p->lock, flags); + ret = timer8_enable(p); + if (!ret) + p->flags |= FLAG_STARTED; return ret; } static void timer8_stop(struct timer8_priv *p) { - unsigned long flags; - - raw_spin_lock_irqsave(&p->lock, flags); - - ctrl_outw(0x0000, p->mapbase + _8TCR); - - raw_spin_unlock_irqrestore(&p->lock, flags); + iowrite16be(0x0000, p->mapbase + _8TCR); } static inline struct timer8_priv *ced_to_priv(struct clock_event_device *ced) @@ -155,7 +99,7 @@ static inline struct timer8_priv *ced_to_priv(struct clock_event_device *ced) return container_of(ced, struct timer8_priv, ced); } -static void timer8_clock_event_start(struct timer8_priv *p, int periodic) +static void timer8_clock_event_start(struct timer8_priv *p, unsigned long delta) { struct clock_event_device *ced = &p->ced; @@ -166,7 +110,7 @@ static void timer8_clock_event_start(struct timer8_priv *p, int periodic) ced->max_delta_ns = clockevent_delta2ns(0xffff, ced); ced->min_delta_ns = clockevent_delta2ns(0x0001, ced); - timer8_set_next(p, periodic?(p->rate + HZ/2) / HZ:0x10000); + timer8_set_next(p, delta); } static int timer8_clock_event_shutdown(struct clock_event_device *ced) @@ -179,9 +123,9 @@ static int timer8_clock_event_periodic(struct clock_event_device *ced) { struct timer8_priv *p = ced_to_priv(ced); - dev_info(&p->pdev->dev, "used for periodic clock events\n"); + pr_info("%s: used for periodic clock events\n", ced->name); timer8_stop(p); - timer8_clock_event_start(p, PERIODIC); + timer8_clock_event_start(p, (p->rate + HZ/2) / HZ); return 0; } @@ -190,9 +134,9 @@ static int timer8_clock_event_oneshot(struct clock_event_device *ced) { struct timer8_priv *p = ced_to_priv(ced); - dev_info(&p->pdev->dev, "used for oneshot clock events\n"); + pr_info("%s: used for oneshot clock events\n", ced->name); timer8_stop(p); - timer8_clock_event_start(p, ONESHOT); + timer8_clock_event_start(p, 0x10000); return 0; } @@ -208,110 +152,64 @@ static int timer8_clock_event_next(unsigned long delta, return 0; } -static int timer8_setup(struct timer8_priv *p, - struct platform_device *pdev) +static struct timer8_priv timer8_priv = { + .ced = { + .name = "h8300_8timer", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = timer8_clock_event_next, + .set_state_shutdown = timer8_clock_event_shutdown, + .set_state_periodic = timer8_clock_event_periodic, + .set_state_oneshot = timer8_clock_event_oneshot, + }, +}; + +static void __init h8300_8timer_init(struct device_node *node) { - struct resource *res; + void __iomem *base; int irq; - int ret; + struct clk *clk; - p->pdev = pdev; - - res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - return -ENXIO; + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("failed to get clock for clockevent\n"); + return; } - irq = platform_get_irq(p->pdev, 0); - if (irq < 0) { - dev_err(&p->pdev->dev, "failed to get irq\n"); - return -ENXIO; + base = of_iomap(node, 0); + if (!base) { + pr_err("failed to map registers for clockevent\n"); + goto free_clk; } - p->mapbase = res->start; - - p->irqaction.name = dev_name(&p->pdev->dev); - p->irqaction.handler = timer8_interrupt; - p->irqaction.dev_id = p; - p->irqaction.flags = IRQF_TIMER; - - p->pclk = clk_get(&p->pdev->dev, "fck"); - if (IS_ERR(p->pclk)) { - dev_err(&p->pdev->dev, "can't get clk\n"); - return PTR_ERR(p->pclk); + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("failed to get irq for clockevent\n"); + goto unmap_reg; } - p->ced.name = pdev->name; - p->ced.features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT; - p->ced.rating = 200; - p->ced.cpumask = cpumask_of(0); - p->ced.set_next_event = timer8_clock_event_next; - p->ced.set_state_shutdown = timer8_clock_event_shutdown; - p->ced.set_state_periodic = timer8_clock_event_periodic; - p->ced.set_state_oneshot = timer8_clock_event_oneshot; - - ret = setup_irq(irq, &p->irqaction); - if (ret < 0) { - dev_err(&p->pdev->dev, - "failed to request irq %d\n", irq); - return ret; - } - clockevents_register_device(&p->ced); - platform_set_drvdata(pdev, p); + timer8_priv.mapbase = base; - return 0; -} - -static int timer8_probe(struct platform_device *pdev) -{ - struct timer8_priv *p = platform_get_drvdata(pdev); - - if (p) { - dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + timer8_priv.rate = clk_get_rate(clk) / SCALE; + if (!timer8_priv.rate) { + pr_err("Failed to get rate for the clocksource\n"); + goto unmap_reg; } - p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - - return timer8_setup(p, pdev); -} - -static int timer8_remove(struct platform_device *pdev) -{ - return -EBUSY; -} - -static const struct of_device_id timer8_of_table[] __maybe_unused = { - { .compatible = "renesas,8bit-timer" }, - { } -}; - -MODULE_DEVICE_TABLE(of, timer8_of_table); -static struct platform_driver timer8_driver = { - .probe = timer8_probe, - .remove = timer8_remove, - .driver = { - .name = "h8300-8timer", - .of_match_table = of_match_ptr(timer8_of_table), + if (request_irq(irq, timer8_interrupt, IRQF_TIMER, + timer8_priv.ced.name, &timer8_priv) < 0) { + pr_err("failed to request irq %d for clockevent\n", irq); + goto unmap_reg; } -}; -static int __init timer8_init(void) -{ - return platform_driver_register(&timer8_driver); -} + clockevents_config_and_register(&timer8_priv.ced, + timer8_priv.rate, 1, 0x0000ffff); -static void __exit timer8_exit(void) -{ - platform_driver_unregister(&timer8_driver); + return; +unmap_reg: + iounmap(base); +free_clk: + clk_put(clk); } -subsys_initcall(timer8_init); -module_exit(timer8_exit); -MODULE_AUTHOR("Yoshinori Sato"); -MODULE_DESCRIPTION("H8/300 8bit Timer Driver"); -MODULE_LICENSE("GPL v2"); +CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init); diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c index 5487410bfabb..d4c1a287c262 100644 --- a/drivers/clocksource/h8300_tpu.c +++ b/drivers/clocksource/h8300_tpu.c @@ -1,42 +1,30 @@ /* - * H8/300 TPU Driver + * H8S TPU Driver * * Copyright 2015 Yoshinori Sato <ysato@users.sourcefoge.jp> * */ #include <linux/errno.h> -#include <linux/sched.h> #include <linux/kernel.h> -#include <linux/interrupt.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/clocksource.h> -#include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> -#include <asm/irq.h> +#define TCR 0x0 +#define TSR 0x5 +#define TCNT 0x6 -#define TCR 0 -#define TMDR 1 -#define TIOR 2 -#define TER 4 -#define TSR 5 -#define TCNT 6 -#define TGRA 8 -#define TGRB 10 -#define TGRC 12 -#define TGRD 14 +#define TCFV 0x10 struct tpu_priv { - struct platform_device *pdev; struct clocksource cs; - struct clk *clk; - unsigned long mapbase1; - unsigned long mapbase2; + void __iomem *mapbase1; + void __iomem *mapbase2; raw_spinlock_t lock; unsigned int cs_enabled; }; @@ -45,8 +33,8 @@ static inline unsigned long read_tcnt32(struct tpu_priv *p) { unsigned long tcnt; - tcnt = ctrl_inw(p->mapbase1 + TCNT) << 16; - tcnt |= ctrl_inw(p->mapbase2 + TCNT); + tcnt = ioread16be(p->mapbase1 + TCNT) << 16; + tcnt |= ioread16be(p->mapbase2 + TCNT); return tcnt; } @@ -55,7 +43,7 @@ static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val) unsigned long v1, v2, v3; int o1, o2; - o1 = ctrl_inb(p->mapbase1 + TSR) & 0x10; + o1 = ioread8(p->mapbase1 + TSR) & TCFV; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { @@ -63,7 +51,7 @@ static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val) v1 = read_tcnt32(p); v2 = read_tcnt32(p); v3 = read_tcnt32(p); - o1 = ctrl_inb(p->mapbase1 + TSR) & 0x10; + o1 = ioread8(p->mapbase1 + TSR) & TCFV; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); @@ -96,10 +84,10 @@ static int tpu_clocksource_enable(struct clocksource *cs) WARN_ON(p->cs_enabled); - ctrl_outw(0, p->mapbase1 + TCNT); - ctrl_outw(0, p->mapbase2 + TCNT); - ctrl_outb(0x0f, p->mapbase1 + TCR); - ctrl_outb(0x03, p->mapbase2 + TCR); + iowrite16be(0, p->mapbase1 + TCNT); + iowrite16be(0, p->mapbase2 + TCNT); + iowrite8(0x0f, p->mapbase1 + TCR); + iowrite8(0x03, p->mapbase2 + TCR); p->cs_enabled = true; return 0; @@ -111,96 +99,59 @@ static void tpu_clocksource_disable(struct clocksource *cs) WARN_ON(!p->cs_enabled); - ctrl_outb(0, p->mapbase1 + TCR); - ctrl_outb(0, p->mapbase2 + TCR); + iowrite8(0, p->mapbase1 + TCR); + iowrite8(0, p->mapbase2 + TCR); p->cs_enabled = false; } +static struct tpu_priv tpu_priv = { + .cs = { + .name = "H8S_TPU", + .rating = 200, + .read = tpu_clocksource_read, + .enable = tpu_clocksource_enable, + .disable = tpu_clocksource_disable, + .mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, +}; + #define CH_L 0 #define CH_H 1 -static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev) +static void __init h8300_tpu_init(struct device_node *node) { - struct resource *res[2]; - - p->pdev = pdev; + void __iomem *base[2]; + struct clk *clk; - res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); - res[CH_H] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_H); - if (!res[CH_L] || !res[CH_H]) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - return -ENXIO; + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("failed to get clock for clocksource\n"); + return; } - p->clk = clk_get(&p->pdev->dev, "fck"); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "can't get clk\n"); - return PTR_ERR(p->clk); + base[CH_L] = of_iomap(node, CH_L); + if (!base[CH_L]) { + pr_err("failed to map registers for clocksource\n"); + goto free_clk; } - - p->mapbase1 = res[CH_L]->start; - p->mapbase2 = res[CH_H]->start; - - p->cs.name = pdev->name; - p->cs.rating = 200; - p->cs.read = tpu_clocksource_read; - p->cs.enable = tpu_clocksource_enable; - p->cs.disable = tpu_clocksource_disable; - p->cs.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); - p->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; - clocksource_register_hz(&p->cs, clk_get_rate(p->clk) / 64); - platform_set_drvdata(pdev, p); - - return 0; -} - -static int tpu_probe(struct platform_device *pdev) -{ - struct tpu_priv *p = platform_get_drvdata(pdev); - - if (p) { - dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + base[CH_H] = of_iomap(node, CH_H); + if (!base[CH_H]) { + pr_err("failed to map registers for clocksource\n"); + goto unmap_L; } - p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; + tpu_priv.mapbase1 = base[CH_L]; + tpu_priv.mapbase2 = base[CH_H]; - return tpu_setup(p, pdev); -} - -static int tpu_remove(struct platform_device *pdev) -{ - return -EBUSY; -} - -static const struct of_device_id tpu_of_table[] = { - { .compatible = "renesas,tpu" }, - { } -}; + clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); -static struct platform_driver tpu_driver = { - .probe = tpu_probe, - .remove = tpu_remove, - .driver = { - .name = "h8s-tpu", - .of_match_table = of_match_ptr(tpu_of_table), - } -}; - -static int __init tpu_init(void) -{ - return platform_driver_register(&tpu_driver); -} + return; -static void __exit tpu_exit(void) -{ - platform_driver_unregister(&tpu_driver); +unmap_L: + iounmap(base[CH_H]); +free_clk: + clk_put(clk); } -subsys_initcall(tpu_init); -module_exit(tpu_exit); -MODULE_AUTHOR("Yoshinori Sato"); -MODULE_DESCRIPTION("H8S Timer Pulse Unit Driver"); -MODULE_LICENSE("GPL v2"); +CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init); diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index fbfc74685e6a..d67bc356488f 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c @@ -16,6 +16,8 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/interrupt.h> @@ -187,10 +189,8 @@ static void __init mtk_timer_init(struct device_node *node) struct clk *clk; evt = kzalloc(sizeof(*evt), GFP_KERNEL); - if (!evt) { - pr_warn("Can't allocate mtk clock event driver struct"); + if (!evt) return; - } evt->dev.name = "mtk_tick"; evt->dev.rating = 300; @@ -204,31 +204,31 @@ static void __init mtk_timer_init(struct device_node *node) evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer"); if (IS_ERR(evt->gpt_base)) { - pr_warn("Can't get resource\n"); - return; + pr_err("Can't get resource\n"); + goto err_kzalloc; } evt->dev.irq = irq_of_parse_and_map(node, 0); if (evt->dev.irq <= 0) { - pr_warn("Can't parse IRQ"); + pr_err("Can't parse IRQ\n"); goto err_mem; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { - pr_warn("Can't get timer clock"); + pr_err("Can't get timer clock\n"); goto err_irq; } if (clk_prepare_enable(clk)) { - pr_warn("Can't prepare clock"); + pr_err("Can't prepare clock\n"); goto err_clk_put; } rate = clk_get_rate(clk); if (request_irq(evt->dev.irq, mtk_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { - pr_warn("failed to setup irq %d\n", evt->dev.irq); + pr_err("failed to setup irq %d\n", evt->dev.irq); goto err_clk_disable; } @@ -260,5 +260,7 @@ err_mem: iounmap(evt->gpt_base); of_address_to_resource(node, 0, &res); release_mem_region(res.start, resource_size(&res)); +err_kzalloc: + kfree(evt); } CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init); diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index d3c1742ded1a..8c77a529d0d4 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -17,16 +17,16 @@ #define TIMER_NAME "rk_timer" -#define TIMER_LOAD_COUNT0 0x00 -#define TIMER_LOAD_COUNT1 0x04 -#define TIMER_CONTROL_REG 0x10 -#define TIMER_INT_STATUS 0x18 +#define TIMER_LOAD_COUNT0 0x00 +#define TIMER_LOAD_COUNT1 0x04 +#define TIMER_CONTROL_REG 0x10 +#define TIMER_INT_STATUS 0x18 -#define TIMER_DISABLE 0x0 -#define TIMER_ENABLE 0x1 -#define TIMER_MODE_FREE_RUNNING (0 << 1) -#define TIMER_MODE_USER_DEFINED_COUNT (1 << 1) -#define TIMER_INT_UNMASK (1 << 2) +#define TIMER_DISABLE 0x0 +#define TIMER_ENABLE 0x1 +#define TIMER_MODE_FREE_RUNNING (0 << 1) +#define TIMER_MODE_USER_DEFINED_COUNT (1 << 1) +#define TIMER_INT_UNMASK (1 << 2) struct bc_timer { struct clock_event_device ce; @@ -49,14 +49,12 @@ static inline void __iomem *rk_base(struct clock_event_device *ce) static inline void rk_timer_disable(struct clock_event_device *ce) { writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG); - dsb(); } static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags) { writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags, rk_base(ce) + TIMER_CONTROL_REG); - dsb(); } static void rk_timer_update_counter(unsigned long cycles, @@ -64,13 +62,11 @@ static void rk_timer_update_counter(unsigned long cycles, { writel_relaxed(cycles, rk_base(ce) + TIMER_LOAD_COUNT0); writel_relaxed(0, rk_base(ce) + TIMER_LOAD_COUNT1); - dsb(); } static void rk_timer_interrupt_clear(struct clock_event_device *ce) { writel_relaxed(1, rk_base(ce) + TIMER_INT_STATUS); - dsb(); } static inline int rk_timer_set_next_event(unsigned long cycles, @@ -173,4 +169,5 @@ static void __init rk_timer_init(struct device_node *np) clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); } + CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c index d297b30d2bc0..2bcecafdeaea 100644 --- a/drivers/clocksource/tango_xtal.c +++ b/drivers/clocksource/tango_xtal.c @@ -19,19 +19,6 @@ static u64 notrace read_sched_clock(void) return read_xtal_counter(); } -static cycle_t read_clocksource(struct clocksource *cs) -{ - return read_xtal_counter(); -} - -static struct clocksource tango_xtal = { - .name = "tango-xtal", - .rating = 350, - .read = read_clocksource, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - static void __init tango_clocksource_init(struct device_node *np) { struct clk *clk; @@ -53,8 +40,9 @@ static void __init tango_clocksource_init(struct device_node *np) delay_timer.freq = xtal_freq; delay_timer.read_current_timer = read_xtal_counter; - ret = clocksource_register_hz(&tango_xtal, xtal_freq); - if (ret != 0) { + ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, + 32, clocksource_mmio_readl_up); + if (!ret) { pr_err("%s: registration failed\n", np->full_name); return; } diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index 6ebda1177e79..38333aba3055 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -96,7 +96,8 @@ static struct clock_event_device tegra_clockevent = { .name = "timer0", .rating = 300, .features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC, + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DYNIRQ, .set_next_event = tegra_timer_set_next_event, .set_state_shutdown = tegra_timer_shutdown, .set_state_periodic = tegra_timer_set_periodic, diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c index a1c06a2bc77c..1316876b487a 100644 --- a/drivers/clocksource/time-lpc32xx.c +++ b/drivers/clocksource/time-lpc32xx.c @@ -125,7 +125,7 @@ static int __init lpc32xx_clocksource_init(struct device_node *np) clk = of_clk_get_by_name(np, "timerclk"); if (IS_ERR(clk)) { - pr_err("clock get failed (%lu)\n", PTR_ERR(clk)); + pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); return PTR_ERR(clk); } @@ -184,7 +184,7 @@ static int __init lpc32xx_clockevent_init(struct device_node *np) clk = of_clk_get_by_name(np, "timerclk"); if (IS_ERR(clk)) { - pr_err("clock get failed (%lu)\n", PTR_ERR(clk)); + pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); return PTR_ERR(clk); } diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c index bba679900054..3269d9ef7a18 100644 --- a/drivers/clocksource/time-pistachio.c +++ b/drivers/clocksource/time-pistachio.c @@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs) counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); raw_spin_unlock_irqrestore(&pcs->lock, flags); - return ~(cycle_t)counter; + return (cycle_t)~counter; } static u64 notrace pistachio_read_sched_clock(void) diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index bca9573e036a..24c83f9efd87 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -152,13 +152,6 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) -{ - struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); - - return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1)); -} - static int sun5i_rate_cb_clksrc(struct notifier_block *nb, unsigned long event, void *data) { @@ -217,13 +210,8 @@ static int __init sun5i_setup_clocksource(struct device_node *node, writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, base + TIMER_CTL_REG(1)); - cs->clksrc.name = node->name; - cs->clksrc.rating = 340; - cs->clksrc.read = sun5i_clksrc_read; - cs->clksrc.mask = CLOCKSOURCE_MASK(32); - cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; - - ret = clocksource_register_hz(&cs->clksrc, rate); + ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name, + rate, 340, 32, clocksource_mmio_readl_down); if (ret) { pr_err("Couldn't register clock source.\n"); goto err_remove_notifier; diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index a92e94b40b5b..de49805fbb09 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -30,7 +30,6 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/delay.h> -#include <asm/mach/time.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index d7373ca69c99..25693b045371 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -179,26 +179,21 @@ static int cn_call_callback(struct sk_buff *skb) * * It checks skb, netlink header and msg sizes, and calls callback helper. */ -static void cn_rx_skb(struct sk_buff *__skb) +static void cn_rx_skb(struct sk_buff *skb) { struct nlmsghdr *nlh; - struct sk_buff *skb; int len, err; - skb = skb_get(__skb); - if (skb->len >= NLMSG_HDRLEN) { nlh = nlmsg_hdr(skb); len = nlmsg_len(nlh); if (len < (int)sizeof(struct cn_msg) || skb->len < nlh->nlmsg_len || - len > CONNECTOR_MAX_MSG_SIZE) { - kfree_skb(skb); + len > CONNECTOR_MAX_MSG_SIZE) return; - } - err = cn_call_callback(skb); + err = cn_call_callback(skb_get(skb)); if (err < 0) kfree_skb(skb); } diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 235a1ba73d92..0031069b64c9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -6,6 +6,8 @@ config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK + # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y + depends on !CPU_THERMAL || THERMAL select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. @@ -217,6 +219,16 @@ config ARM_SPEAR_CPUFREQ help This adds the CPUFreq driver support for SPEAr SOCs. +config ARM_STI_CPUFREQ + tristate "STi CPUFreq support" + depends on SOC_STIH407 + help + This driver uses the generic OPP framework to match the running + platform with a predefined set of suitable values. If not provided + we will fall-back so safe-values contained in Device Tree. Enable + this config option if you wish to add CPUFreq support for STi based + SoCs. + config ARM_TEGRA20_CPUFREQ bool "Tegra20 CPUFreq support" depends on ARCH_TEGRA @@ -226,7 +238,7 @@ config ARM_TEGRA20_CPUFREQ config ARM_TEGRA124_CPUFREQ tristate "Tegra124 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT + depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR default y help This adds the CPUFreq driver support for Tegra124 SOCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c0af1a1281c8..9e63fb1b09f8 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o +obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index cec1ee2d2f74..51eef87bbc37 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -135,7 +135,7 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) wrmsr_on_cpus(cpumask, msr_addr, msrs); } -static int _store_boost(int val) +static int set_boost(int val) { get_online_cpus(); boost_set_msrs(val, cpu_online_mask); @@ -158,29 +158,24 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) cpufreq_freq_attr_ro(freqdomain_cpus); #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB -static ssize_t store_boost(const char *buf, size_t count) +static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, + size_t count) { int ret; - unsigned long val = 0; + unsigned int val = 0; - if (!acpi_cpufreq_driver.boost_supported) + if (!acpi_cpufreq_driver.set_boost) return -EINVAL; - ret = kstrtoul(buf, 10, &val); - if (ret || (val > 1)) + ret = kstrtouint(buf, 10, &val); + if (ret || val > 1) return -EINVAL; - _store_boost((int) val); + set_boost(val); return count; } -static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, - size_t count) -{ - return store_boost(buf, count); -} - static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) { return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled); @@ -905,7 +900,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = { .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", .attr = acpi_cpufreq_attr, - .set_boost = _store_boost, }; static void __init acpi_cpufreq_boost_init(void) @@ -916,7 +910,7 @@ static void __init acpi_cpufreq_boost_init(void) if (!msrs) return; - acpi_cpufreq_driver.boost_supported = true; + acpi_cpufreq_driver.set_boost = set_boost; acpi_cpufreq_driver.boost_enabled = boost_state(0); cpu_notifier_register_begin(); diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index c5d256caa664..c251247ae661 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -23,6 +23,7 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> +#include <linux/cpu_cooling.h> #include <linux/export.h> #include <linux/module.h> #include <linux/mutex.h> @@ -55,6 +56,7 @@ static bool bL_switching_enabled; #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) +static struct thermal_cooling_device *cdev[MAX_CLUSTERS]; static struct cpufreq_arm_bL_ops *arm_bL_ops; static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; @@ -493,6 +495,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) static int bL_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; + int cur_cluster = cpu_to_cluster(policy->cpu); + + if (cur_cluster < MAX_CLUSTERS) { + cpufreq_cooling_unregister(cdev[cur_cluster]); + cdev[cur_cluster] = NULL; + } cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -507,6 +515,38 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) return 0; } +static void bL_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct device *cpu_dev = get_cpu_device(policy->cpu); + int cur_cluster = cpu_to_cluster(policy->cpu); + struct device_node *np; + + /* Do not register a cpu_cooling device if we are in IKS mode */ + if (cur_cluster >= MAX_CLUSTERS) + return; + + np = of_node_get(cpu_dev->of_node); + if (WARN_ON(!np)) + return; + + if (of_find_property(np, "#cooling-cells", NULL)) { + u32 power_coefficient = 0; + + of_property_read_u32(np, "dynamic-power-coefficient", + &power_coefficient); + + cdev[cur_cluster] = of_cpufreq_power_cooling_register(np, + policy->related_cpus, power_coefficient, NULL); + if (IS_ERR(cdev[cur_cluster])) { + dev_err(cpu_dev, + "running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev[cur_cluster])); + cdev[cur_cluster] = NULL; + } + } + of_node_put(np); +} + static struct cpufreq_driver bL_cpufreq_driver = { .name = "arm-big-little", .flags = CPUFREQ_STICKY | @@ -517,6 +557,7 @@ static struct cpufreq_driver bL_cpufreq_driver = { .get = bL_cpufreq_get_rate, .init = bL_cpufreq_init, .exit = bL_cpufreq_exit, + .ready = bL_cpufreq_ready, .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c index a9f8e5bd0716..12e97d8a9db0 100644 --- a/drivers/cpufreq/blackfin-cpufreq.c +++ b/drivers/cpufreq/blackfin-cpufreq.c @@ -112,7 +112,7 @@ static unsigned int bfin_getfreq_khz(unsigned int cpu) } #ifdef CONFIG_BF60x -unsigned long cpu_set_cclk(int cpu, unsigned long new) +static int cpu_set_cclk(int cpu, unsigned long new) { struct clk *clk; int ret; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 90d64081ddb3..9bc37c437874 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -50,7 +50,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) struct private_data *priv = policy->driver_data; struct device *cpu_dev = priv->cpu_dev; struct regulator *cpu_reg = priv->cpu_reg; - unsigned long volt = 0, volt_old = 0, tol = 0; + unsigned long volt = 0, tol = 0; + int volt_old = 0; unsigned int old_freq, new_freq; long freq_Hz, freq_exact; int ret; @@ -83,7 +84,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) opp_freq / 1000, volt); } - dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", + dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %ld mV\n", old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, new_freq / 1000, volt ? volt / 1000 : -1); @@ -407,8 +408,13 @@ static void cpufreq_ready(struct cpufreq_policy *policy) * thermal DT code takes care of matching them. */ if (of_find_property(np, "#cooling-cells", NULL)) { - priv->cdev = of_cpufreq_cooling_register(np, - policy->related_cpus); + u32 power_coefficient = 0; + + of_property_read_u32(np, "dynamic-power-coefficient", + &power_coefficient); + + priv->cdev = of_cpufreq_power_cooling_register(np, + policy->related_cpus, power_coefficient, NULL); if (IS_ERR(priv->cdev)) { dev_err(priv->cpu_dev, "running cpufreq without cooling device: %ld\n", diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8412ce5f93a7..c35e7da1ed7a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2330,29 +2330,15 @@ int cpufreq_boost_trigger_state(int state) return ret; } -int cpufreq_boost_supported(void) +static bool cpufreq_boost_supported(void) { - if (likely(cpufreq_driver)) - return cpufreq_driver->boost_supported; - - return 0; + return likely(cpufreq_driver) && cpufreq_driver->set_boost; } -EXPORT_SYMBOL_GPL(cpufreq_boost_supported); static int create_boost_sysfs_file(void) { int ret; - if (!cpufreq_boost_supported()) - return 0; - - /* - * Check if driver provides function to enable boost - - * if not, use cpufreq_boost_set_sw as default - */ - if (!cpufreq_driver->set_boost) - cpufreq_driver->set_boost = cpufreq_boost_set_sw; - ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr); if (ret) pr_err("%s: cannot register global BOOST sysfs file\n", @@ -2375,7 +2361,7 @@ int cpufreq_enable_boost_support(void) if (cpufreq_boost_supported()) return 0; - cpufreq_driver->boost_supported = true; + cpufreq_driver->set_boost = cpufreq_boost_set_sw; /* This will get removed on driver unregister */ return create_boost_sysfs_file(); @@ -2435,9 +2421,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; - ret = create_boost_sysfs_file(); - if (ret) - goto err_null_driver; + if (cpufreq_boost_supported()) { + ret = create_boost_sysfs_file(); + if (ret) + goto err_null_driver; + } ret = subsys_interface_register(&cpufreq_interface); if (ret) diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 1fa1deb6e91f..606ad74abe6e 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -115,13 +115,13 @@ static void cs_check_cpu(int cpu, unsigned int load) } } -static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs, - struct dbs_data *dbs_data, bool modify_all) +static unsigned int cs_dbs_timer(struct cpufreq_policy *policy, bool modify_all) { + struct dbs_data *dbs_data = policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; if (modify_all) - dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu); + dbs_check_cpu(dbs_data, policy->cpu); return delay_for_sampling_rate(cs_tuners->sampling_rate); } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index b260576ddb12..bab3a514ec12 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -84,6 +84,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) (cur_wall_time - j_cdbs->prev_cpu_wall); j_cdbs->prev_cpu_wall = cur_wall_time; + if (cur_idle_time < j_cdbs->prev_cpu_idle) + cur_idle_time = j_cdbs->prev_cpu_idle; + idle_time = (unsigned int) (cur_idle_time - j_cdbs->prev_cpu_idle); j_cdbs->prev_cpu_idle = cur_idle_time; @@ -158,47 +161,55 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) } EXPORT_SYMBOL_GPL(dbs_check_cpu); -static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, - unsigned int delay) +void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay) { - struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); - - mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay); -} - -void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, - unsigned int delay, bool all_cpus) -{ - int i; + struct dbs_data *dbs_data = policy->governor_data; + struct cpu_dbs_info *cdbs; + int cpu; - if (!all_cpus) { - /* - * Use raw_smp_processor_id() to avoid preemptible warnings. - * We know that this is only called with all_cpus == false from - * works that have been queued with *_work_on() functions and - * those works are canceled during CPU_DOWN_PREPARE so they - * can't possibly run on any other CPU. - */ - __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); - } else { - for_each_cpu(i, policy->cpus) - __gov_queue_work(i, dbs_data, delay); + for_each_cpu(cpu, policy->cpus) { + cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); + cdbs->timer.expires = jiffies + delay; + add_timer_on(&cdbs->timer, cpu); } } -EXPORT_SYMBOL_GPL(gov_queue_work); +EXPORT_SYMBOL_GPL(gov_add_timers); -static inline void gov_cancel_work(struct dbs_data *dbs_data, - struct cpufreq_policy *policy) +static inline void gov_cancel_timers(struct cpufreq_policy *policy) { + struct dbs_data *dbs_data = policy->governor_data; struct cpu_dbs_info *cdbs; int i; for_each_cpu(i, policy->cpus) { cdbs = dbs_data->cdata->get_cpu_cdbs(i); - cancel_delayed_work_sync(&cdbs->dwork); + del_timer_sync(&cdbs->timer); } } +void gov_cancel_work(struct cpu_common_dbs_info *shared) +{ + /* Tell dbs_timer_handler() to skip queuing up work items. */ + atomic_inc(&shared->skip_work); + /* + * If dbs_timer_handler() is already running, it may not notice the + * incremented skip_work, so wait for it to complete to prevent its work + * item from being queued up after the cancel_work_sync() below. + */ + gov_cancel_timers(shared->policy); + /* + * In case dbs_timer_handler() managed to run and spawn a work item + * before the timers have been canceled, wait for that work item to + * complete and then cancel all of the timers set up by it. If + * dbs_timer_handler() runs again at that point, it will see the + * positive value of skip_work and won't spawn any more work items. + */ + cancel_work_sync(&shared->work); + gov_cancel_timers(shared->policy); + atomic_set(&shared->skip_work, 0); +} +EXPORT_SYMBOL_GPL(gov_cancel_work); + /* Will return if we need to evaluate cpu load again or not */ static bool need_load_eval(struct cpu_common_dbs_info *shared, unsigned int sampling_rate) @@ -217,29 +228,21 @@ static bool need_load_eval(struct cpu_common_dbs_info *shared, return true; } -static void dbs_timer(struct work_struct *work) +static void dbs_work_handler(struct work_struct *work) { - struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, - dwork.work); - struct cpu_common_dbs_info *shared = cdbs->shared; + struct cpu_common_dbs_info *shared = container_of(work, struct + cpu_common_dbs_info, work); struct cpufreq_policy *policy; struct dbs_data *dbs_data; unsigned int sampling_rate, delay; - bool modify_all = true; - - mutex_lock(&shared->timer_mutex); + bool eval_load; policy = shared->policy; - - /* - * Governor might already be disabled and there is no point continuing - * with the work-handler. - */ - if (!policy) - goto unlock; - dbs_data = policy->governor_data; + /* Kill all timers */ + gov_cancel_timers(policy); + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; @@ -250,14 +253,37 @@ static void dbs_timer(struct work_struct *work) sampling_rate = od_tuners->sampling_rate; } - if (!need_load_eval(cdbs->shared, sampling_rate)) - modify_all = false; - - delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); - gov_queue_work(dbs_data, policy, delay, modify_all); + eval_load = need_load_eval(shared, sampling_rate); -unlock: + /* + * Make sure cpufreq_governor_limits() isn't evaluating load in + * parallel. + */ + mutex_lock(&shared->timer_mutex); + delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load); mutex_unlock(&shared->timer_mutex); + + atomic_dec(&shared->skip_work); + + gov_add_timers(policy, delay); +} + +static void dbs_timer_handler(unsigned long data) +{ + struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data; + struct cpu_common_dbs_info *shared = cdbs->shared; + + /* + * Timer handler may not be allowed to queue the work at the moment, + * because: + * - Another timer handler has done that + * - We are stopping the governor + * - Or we are updating the sampling rate of the ondemand governor + */ + if (atomic_inc_return(&shared->skip_work) > 1) + atomic_dec(&shared->skip_work); + else + queue_work(system_wq, &shared->work); } static void set_sampling_rate(struct dbs_data *dbs_data, @@ -287,6 +313,9 @@ static int alloc_common_dbs_info(struct cpufreq_policy *policy, for_each_cpu(j, policy->related_cpus) cdata->get_cpu_cdbs(j)->shared = shared; + mutex_init(&shared->timer_mutex); + atomic_set(&shared->skip_work, 0); + INIT_WORK(&shared->work, dbs_work_handler); return 0; } @@ -297,6 +326,8 @@ static void free_common_dbs_info(struct cpufreq_policy *policy, struct cpu_common_dbs_info *shared = cdbs->shared; int j; + mutex_destroy(&shared->timer_mutex); + for_each_cpu(j, policy->cpus) cdata->get_cpu_cdbs(j)->shared = NULL; @@ -433,7 +464,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, shared->policy = policy; shared->time_stamp = ktime_get(); - mutex_init(&shared->timer_mutex); for_each_cpu(j, policy->cpus) { struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j); @@ -450,7 +480,9 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer); + __setup_timer(&j_cdbs->timer, dbs_timer_handler, + (unsigned long)j_cdbs, + TIMER_DEFERRABLE | TIMER_IRQSAFE); } if (cdata->governor == GOV_CONSERVATIVE) { @@ -468,8 +500,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, od_ops->powersave_bias_init_cpu(cpu); } - gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), - true); + gov_add_timers(policy, delay_for_sampling_rate(sampling_rate)); return 0; } @@ -483,18 +514,9 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, if (!shared || !shared->policy) return -EBUSY; - /* - * Work-handler must see this updated, as it should not proceed any - * further after governor is disabled. And so timer_mutex is taken while - * updating this value. - */ - mutex_lock(&shared->timer_mutex); + gov_cancel_work(shared); shared->policy = NULL; - mutex_unlock(&shared->timer_mutex); - - gov_cancel_work(dbs_data, policy); - mutex_destroy(&shared->timer_mutex); return 0; } diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 5621bb03e874..91e767a058a7 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -17,6 +17,7 @@ #ifndef _CPUFREQ_GOVERNOR_H #define _CPUFREQ_GOVERNOR_H +#include <linux/atomic.h> #include <linux/cpufreq.h> #include <linux/kernel_stat.h> #include <linux/module.h> @@ -132,12 +133,14 @@ static void *get_cpu_dbs_info_s(int cpu) \ struct cpu_common_dbs_info { struct cpufreq_policy *policy; /* - * percpu mutex that serializes governor limit change with dbs_timer - * invocation. We do not want dbs_timer to run when user is changing - * the governor or limits. + * Per policy mutex that serializes load evaluation from limit-change + * and work-handler. */ struct mutex timer_mutex; + ktime_t time_stamp; + atomic_t skip_work; + struct work_struct work; }; /* Per cpu structures */ @@ -152,7 +155,7 @@ struct cpu_dbs_info { * wake-up from idle. */ unsigned int prev_load; - struct delayed_work dwork; + struct timer_list timer; struct cpu_common_dbs_info *shared; }; @@ -209,8 +212,7 @@ struct common_dbs_data { struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu); - unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs, - struct dbs_data *dbs_data, + unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy, bool modify_all); void (*gov_check_cpu)(int cpu, unsigned int load); int (*init)(struct dbs_data *dbs_data, bool notify); @@ -269,11 +271,11 @@ static ssize_t show_sampling_rate_min_gov_pol \ extern struct mutex cpufreq_governor_lock; +void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay); +void gov_cancel_work(struct cpu_common_dbs_info *shared); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event); -void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, - unsigned int delay, bool all_cpus); void od_register_powersave_bias_handler(unsigned int (*f) (struct cpufreq_policy *, unsigned int, unsigned int), unsigned int powersave_bias); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 03ac6ce54042..eae51070c034 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -191,10 +191,9 @@ static void od_check_cpu(int cpu, unsigned int load) } } -static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs, - struct dbs_data *dbs_data, bool modify_all) +static unsigned int od_dbs_timer(struct cpufreq_policy *policy, bool modify_all) { - struct cpufreq_policy *policy = cdbs->shared->policy; + struct dbs_data *dbs_data = policy->governor_data; unsigned int cpu = policy->cpu; struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); @@ -247,40 +246,66 @@ static void update_sampling_rate(struct dbs_data *dbs_data, unsigned int new_rate) { struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cpumask cpumask; int cpu; od_tuners->sampling_rate = new_rate = max(new_rate, dbs_data->min_sampling_rate); - for_each_online_cpu(cpu) { + /* + * Lock governor so that governor start/stop can't execute in parallel. + */ + mutex_lock(&od_dbs_cdata.mutex); + + cpumask_copy(&cpumask, cpu_online_mask); + + for_each_cpu(cpu, &cpumask) { struct cpufreq_policy *policy; struct od_cpu_dbs_info_s *dbs_info; + struct cpu_dbs_info *cdbs; + struct cpu_common_dbs_info *shared; unsigned long next_sampling, appointed_at; - policy = cpufreq_cpu_get(cpu); - if (!policy) - continue; - if (policy->governor != &cpufreq_gov_ondemand) { - cpufreq_cpu_put(policy); - continue; - } dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - cpufreq_cpu_put(policy); + cdbs = &dbs_info->cdbs; + shared = cdbs->shared; - if (!delayed_work_pending(&dbs_info->cdbs.dwork)) + /* + * A valid shared and shared->policy means governor hasn't + * stopped or exited yet. + */ + if (!shared || !shared->policy) + continue; + + policy = shared->policy; + + /* clear all CPUs of this policy */ + cpumask_andnot(&cpumask, &cpumask, policy->cpus); + + /* + * Update sampling rate for CPUs whose policy is governed by + * dbs_data. In case of governor_per_policy, only a single + * policy will be governed by dbs_data, otherwise there can be + * multiple policies that are governed by the same dbs_data. + */ + if (dbs_data != policy->governor_data) continue; + /* + * Checking this for any CPU should be fine, timers for all of + * them are scheduled together. + */ next_sampling = jiffies + usecs_to_jiffies(new_rate); - appointed_at = dbs_info->cdbs.dwork.timer.expires; + appointed_at = dbs_info->cdbs.timer.expires; if (time_before(next_sampling, appointed_at)) { - cancel_delayed_work_sync(&dbs_info->cdbs.dwork); - - gov_queue_work(dbs_data, policy, - usecs_to_jiffies(new_rate), true); + gov_cancel_work(shared); + gov_add_timers(policy, usecs_to_jiffies(new_rate)); } } + + mutex_unlock(&od_dbs_cdata.mutex); } static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4d07cbd2b23c..cd83d477e32d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -66,6 +66,7 @@ static inline int ceiling_fp(int32_t x) struct sample { int32_t core_pct_busy; + int32_t busy_scaled; u64 aperf; u64 mperf; u64 tsc; @@ -112,6 +113,7 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; + u64 prev_cummulative_iowait; struct sample sample; }; @@ -133,6 +135,7 @@ struct pstate_funcs { int (*get_scaling)(void); void (*set)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); + int32_t (*get_target_pstate)(struct cpudata *); }; struct cpu_defaults { @@ -140,6 +143,9 @@ struct cpu_defaults { struct pstate_funcs funcs; }; +static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); +static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); + static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; static int hwp_active; @@ -738,6 +744,7 @@ static struct cpu_defaults core_params = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .set = core_set_pstate, + .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -758,6 +765,7 @@ static struct cpu_defaults silvermont_params = { .set = atom_set_pstate, .get_scaling = silvermont_get_scaling, .get_vid = atom_get_vid, + .get_target_pstate = get_target_pstate_use_cpu_load, }, }; @@ -778,6 +786,7 @@ static struct cpu_defaults airmont_params = { .set = atom_set_pstate, .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, + .get_target_pstate = get_target_pstate_use_cpu_load, }, }; @@ -797,6 +806,7 @@ static struct cpu_defaults knl_params = { .get_turbo = knl_get_turbo_pstate, .get_scaling = core_get_scaling, .set = core_set_pstate, + .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -882,12 +892,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu) local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - if (cpu->prev_mperf == mperf) { + tsc = rdtsc(); + if ((cpu->prev_mperf == mperf) || (cpu->prev_tsc == tsc)) { local_irq_restore(flags); return; } - - tsc = rdtsc(); local_irq_restore(flags); cpu->last_sample_time = cpu->sample.time; @@ -922,7 +931,43 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) +static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) +{ + struct sample *sample = &cpu->sample; + u64 cummulative_iowait, delta_iowait_us; + u64 delta_iowait_mperf; + u64 mperf, now; + int32_t cpu_load; + + cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); + + /* + * Convert iowait time into number of IO cycles spent at max_freq. + * IO is considered as busy only for the cpu_load algorithm. For + * performance this is not needed since we always try to reach the + * maximum P-State, so we are already boosting the IOs. + */ + delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; + delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * + cpu->pstate.max_pstate, MSEC_PER_SEC); + + mperf = cpu->sample.mperf + delta_iowait_mperf; + cpu->prev_cummulative_iowait = cummulative_iowait; + + + /* + * The load can be estimated as the ratio of the mperf counter + * running at a constant frequency during active periods + * (C0) and the time stamp counter running at the same frequency + * also during C-states. + */ + cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); + cpu->sample.busy_scaled = cpu_load; + + return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); +} + +static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) { int32_t core_busy, max_pstate, current_pstate, sample_ratio; s64 duration_us; @@ -960,30 +1005,24 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) core_busy = mul_fp(core_busy, sample_ratio); } - return core_busy; + cpu->sample.busy_scaled = core_busy; + return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { - int32_t busy_scaled; - struct _pid *pid; - signed int ctl; - int from; + int from, target_pstate; struct sample *sample; from = cpu->pstate.current_pstate; - pid = &cpu->pid; - busy_scaled = intel_pstate_get_scaled_busy(cpu); + target_pstate = pstate_funcs.get_target_pstate(cpu); - ctl = pid_calc(pid, busy_scaled); - - /* Negative values of ctl increase the pstate and vice versa */ - intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); + intel_pstate_set_pstate(cpu, target_pstate, true); sample = &cpu->sample; trace_pstate_sample(fp_toint(sample->core_pct_busy), - fp_toint(busy_scaled), + fp_toint(sample->busy_scaled), from, cpu->pstate.current_pstate, sample->mperf, @@ -1123,7 +1162,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_sysfs_pct); limits->max_perf_pct = max(limits->min_policy_pct, limits->max_perf_pct); - limits->max_perf = round_up(limits->max_perf, 8); + limits->max_perf = round_up(limits->max_perf, FRAC_BITS); /* Make sure min_perf_pct <= max_perf_pct */ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); @@ -1237,6 +1276,8 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_scaling = funcs->get_scaling; pstate_funcs.set = funcs->set; pstate_funcs.get_vid = funcs->get_vid; + pstate_funcs.get_target_pstate = funcs->get_target_pstate; + } #if IS_ENABLED(CONFIG_ACPI) diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c index 83001dc5b646..1efba340456d 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mt8173-cpufreq.c @@ -41,16 +41,35 @@ * the original PLL becomes stable at target frequency. */ struct mtk_cpu_dvfs_info { + struct cpumask cpus; struct device *cpu_dev; struct regulator *proc_reg; struct regulator *sram_reg; struct clk *cpu_clk; struct clk *inter_clk; struct thermal_cooling_device *cdev; + struct list_head list_head; int intermediate_voltage; bool need_voltage_tracking; }; +static LIST_HEAD(dvfs_info_list); + +static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) +{ + struct mtk_cpu_dvfs_info *info; + struct list_head *list; + + list_for_each(list, &dvfs_info_list) { + info = list_entry(list, struct mtk_cpu_dvfs_info, list_head); + + if (cpumask_test_cpu(cpu, &info->cpus)) + return info; + } + + return NULL; +} + static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, int new_vproc) { @@ -59,7 +78,10 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, int old_vproc, old_vsram, new_vsram, vsram, vproc, ret; old_vproc = regulator_get_voltage(proc_reg); - old_vsram = regulator_get_voltage(sram_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); + return old_vproc; + } /* Vsram should not exceed the maximum allowed voltage of SoC. */ new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT); @@ -72,7 +94,17 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, */ do { old_vsram = regulator_get_voltage(sram_reg); + if (old_vsram < 0) { + pr_err("%s: invalid Vsram value: %d\n", + __func__, old_vsram); + return old_vsram; + } old_vproc = regulator_get_voltage(proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", + __func__, old_vproc); + return old_vproc; + } vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT); @@ -117,7 +149,17 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, */ do { old_vproc = regulator_get_voltage(proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", + __func__, old_vproc); + return old_vproc; + } old_vsram = regulator_get_voltage(sram_reg); + if (old_vsram < 0) { + pr_err("%s: invalid Vsram value: %d\n", + __func__, old_vsram); + return old_vsram; + } vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT); ret = regulator_set_voltage(proc_reg, vproc, @@ -185,6 +227,10 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, old_freq_hz = clk_get_rate(cpu_clk); old_vproc = regulator_get_voltage(info->proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); + return old_vproc; + } freq_hz = freq_table[index].frequency * 1000; @@ -344,7 +390,15 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) /* Both presence and absence of sram regulator are valid cases. */ sram_reg = regulator_get_exclusive(cpu_dev, "sram"); - ret = dev_pm_opp_of_add_table(cpu_dev); + /* Get OPP-sharing information from "operating-points-v2" bindings */ + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); + if (ret) { + pr_err("failed to get OPP-sharing information for cpu%d\n", + cpu); + goto out_free_resources; + } + + ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); if (ret) { pr_warn("no OPP table for cpu%d\n", cpu); goto out_free_resources; @@ -378,7 +432,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) return 0; out_free_opp_table: - dev_pm_opp_of_remove_table(cpu_dev); + dev_pm_opp_of_cpumask_remove_table(&info->cpus); out_free_resources: if (!IS_ERR(proc_reg)) @@ -404,7 +458,7 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info) if (!IS_ERR(info->inter_clk)) clk_put(info->inter_clk); - dev_pm_opp_of_remove_table(info->cpu_dev); + dev_pm_opp_of_cpumask_remove_table(&info->cpus); } static int mtk_cpufreq_init(struct cpufreq_policy *policy) @@ -413,22 +467,18 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *freq_table; int ret; - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - - ret = mtk_cpu_dvfs_info_init(info, policy->cpu); - if (ret) { - pr_err("%s failed to initialize dvfs info for cpu%d\n", - __func__, policy->cpu); - goto out_free_dvfs_info; + info = mtk_cpu_dvfs_info_lookup(policy->cpu); + if (!info) { + pr_err("dvfs info for cpu%d is not initialized.\n", + policy->cpu); + return -EINVAL; } ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table for cpu%d: %d\n", policy->cpu, ret); - goto out_release_dvfs_info; + return ret; } ret = cpufreq_table_validate_and_show(policy, freq_table); @@ -437,8 +487,7 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) goto out_free_cpufreq_table; } - /* CPUs in the same cluster share a clock and power domain. */ - cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling); + cpumask_copy(policy->cpus, &info->cpus); policy->driver_data = info; policy->clk = info->cpu_clk; @@ -446,13 +495,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table); - -out_release_dvfs_info: - mtk_cpu_dvfs_info_release(info); - -out_free_dvfs_info: - kfree(info); - return ret; } @@ -462,14 +504,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(info->cdev); dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); - mtk_cpu_dvfs_info_release(info); - kfree(info); return 0; } static struct cpufreq_driver mt8173_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY, .verify = cpufreq_generic_frequency_table_verify, .target_index = mtk_cpufreq_set_target, .get = cpufreq_generic_get, @@ -482,11 +523,47 @@ static struct cpufreq_driver mt8173_cpufreq_driver = { static int mt8173_cpufreq_probe(struct platform_device *pdev) { - int ret; + struct mtk_cpu_dvfs_info *info; + struct list_head *list, *tmp; + int cpu, ret; + + for_each_possible_cpu(cpu) { + info = mtk_cpu_dvfs_info_lookup(cpu); + if (info) + continue; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto release_dvfs_info_list; + } + + ret = mtk_cpu_dvfs_info_init(info, cpu); + if (ret) { + dev_err(&pdev->dev, + "failed to initialize dvfs info for cpu%d\n", + cpu); + goto release_dvfs_info_list; + } + + list_add(&info->list_head, &dvfs_info_list); + } ret = cpufreq_register_driver(&mt8173_cpufreq_driver); - if (ret) - pr_err("failed to register mtk cpufreq driver\n"); + if (ret) { + dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); + goto release_dvfs_info_list; + } + + return 0; + +release_dvfs_info_list: + list_for_each_safe(list, tmp, &dvfs_info_list) { + info = list_entry(list, struct mtk_cpu_dvfs_info, list_head); + + mtk_cpu_dvfs_info_release(info); + list_del(list); + } return ret; } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 2a0d58959acf..808a320e9d5d 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -555,6 +555,8 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 358f0752c31e..b23e525a7af3 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -12,6 +12,7 @@ #include <linux/clk.h> #include <linux/cpufreq.h> +#include <linux/cpu_cooling.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> @@ -33,6 +34,7 @@ struct cpu_data { struct clk **pclk; struct cpufreq_frequency_table *table; + struct thermal_cooling_device *cdev; }; /** @@ -321,6 +323,27 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy, return clk_set_parent(policy->clk, parent); } + +static void qoriq_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct cpu_data *cpud = policy->driver_data; + struct device_node *np = of_get_cpu_node(policy->cpu, NULL); + + if (of_find_property(np, "#cooling-cells", NULL)) { + cpud->cdev = of_cpufreq_cooling_register(np, + policy->related_cpus); + + if (IS_ERR(cpud->cdev)) { + pr_err("Failed to register cooling device cpu%d: %ld\n", + policy->cpu, PTR_ERR(cpud->cdev)); + + cpud->cdev = NULL; + } + } + + of_node_put(np); +} + static struct cpufreq_driver qoriq_cpufreq_driver = { .name = "qoriq_cpufreq", .flags = CPUFREQ_CONST_LOOPS, @@ -329,6 +352,7 @@ static struct cpufreq_driver qoriq_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = qoriq_cpufreq_target, .get = cpufreq_generic_get, + .ready = qoriq_cpufreq_ready, .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 2c3b16fd3a01..de5e89b2eaaa 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops; static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev) { - u8 domain = topology_physical_package_id(cpu_dev->id); + int domain = topology_physical_package_id(cpu_dev->id); if (domain < 0) return ERR_PTR(-EINVAL); diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c new file mode 100644 index 000000000000..a9c659f58974 --- /dev/null +++ b/drivers/cpufreq/sti-cpufreq.c @@ -0,0 +1,294 @@ +/* + * Match running platform with pre-defined OPP values for CPUFreq + * + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * Lee Jones <lee.jones@linaro.org> + * + * Copyright (C) 2015 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License as + * published by the Free Software Foundation + */ + +#include <linux/cpu.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pm_opp.h> +#include <linux/regmap.h> + +#define VERSION_ELEMENTS 3 +#define MAX_PCODE_NAME_LEN 7 + +#define VERSION_SHIFT 28 +#define HW_INFO_INDEX 1 +#define MAJOR_ID_INDEX 1 +#define MINOR_ID_INDEX 2 + +/* + * Only match on "suitable for ALL versions" entries + * + * This will be used with the BIT() macro. It sets the + * top bit of a 32bit value and is equal to 0x80000000. + */ +#define DEFAULT_VERSION 31 + +enum { + PCODE = 0, + SUBSTRATE, + DVFS_MAX_REGFIELDS, +}; + +/** + * ST CPUFreq Driver Data + * + * @cpu_node CPU's OF node + * @syscfg_eng Engineering Syscon register map + * @regmap Syscon register map + */ +static struct sti_cpufreq_ddata { + struct device *cpu; + struct regmap *syscfg_eng; + struct regmap *syscfg; +} ddata; + +static int sti_cpufreq_fetch_major(void) { + struct device_node *np = ddata.cpu->of_node; + struct device *dev = ddata.cpu; + unsigned int major_offset; + unsigned int socid; + int ret; + + ret = of_property_read_u32_index(np, "st,syscfg", + MAJOR_ID_INDEX, &major_offset); + if (ret) { + dev_err(dev, "No major number offset provided in %s [%d]\n", + np->full_name, ret); + return ret; + } + + ret = regmap_read(ddata.syscfg, major_offset, &socid); + if (ret) { + dev_err(dev, "Failed to read major number from syscon [%d]\n", + ret); + return ret; + } + + return ((socid >> VERSION_SHIFT) & 0xf) + 1; +} + +static int sti_cpufreq_fetch_minor(void) +{ + struct device *dev = ddata.cpu; + struct device_node *np = dev->of_node; + unsigned int minor_offset; + unsigned int minid; + int ret; + + ret = of_property_read_u32_index(np, "st,syscfg-eng", + MINOR_ID_INDEX, &minor_offset); + if (ret) { + dev_err(dev, + "No minor number offset provided %s [%d]\n", + np->full_name, ret); + return ret; + } + + ret = regmap_read(ddata.syscfg_eng, minor_offset, &minid); + if (ret) { + dev_err(dev, + "Failed to read the minor number from syscon [%d]\n", + ret); + return ret; + } + + return minid & 0xf; +} + +static int sti_cpufreq_fetch_regmap_field(const struct reg_field *reg_fields, + int hw_info_offset, int field) +{ + struct regmap_field *regmap_field; + struct reg_field reg_field = reg_fields[field]; + struct device *dev = ddata.cpu; + unsigned int value; + int ret; + + reg_field.reg = hw_info_offset; + regmap_field = devm_regmap_field_alloc(dev, + ddata.syscfg_eng, + reg_field); + if (IS_ERR(regmap_field)) { + dev_err(dev, "Failed to allocate reg field\n"); + return PTR_ERR(regmap_field); + } + + ret = regmap_field_read(regmap_field, &value); + if (ret) { + dev_err(dev, "Failed to read %s code\n", + field ? "SUBSTRATE" : "PCODE"); + return ret; + } + + return value; +} + +static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = { + [PCODE] = REG_FIELD(0, 16, 19), + [SUBSTRATE] = REG_FIELD(0, 0, 2), +}; + +static const struct reg_field *sti_cpufreq_match(void) +{ + if (of_machine_is_compatible("st,stih407") || + of_machine_is_compatible("st,stih410")) + return sti_stih407_dvfs_regfields; + + return NULL; +} + +static int sti_cpufreq_set_opp_info(void) +{ + struct device *dev = ddata.cpu; + struct device_node *np = dev->of_node; + const struct reg_field *reg_fields; + unsigned int hw_info_offset; + unsigned int version[VERSION_ELEMENTS]; + int pcode, substrate, major, minor; + int ret; + char name[MAX_PCODE_NAME_LEN]; + + reg_fields = sti_cpufreq_match(); + if (!reg_fields) { + dev_err(dev, "This SoC doesn't support voltage scaling"); + return -ENODEV; + } + + ret = of_property_read_u32_index(np, "st,syscfg-eng", + HW_INFO_INDEX, &hw_info_offset); + if (ret) { + dev_warn(dev, "Failed to read HW info offset from DT\n"); + substrate = DEFAULT_VERSION; + pcode = 0; + goto use_defaults; + } + + pcode = sti_cpufreq_fetch_regmap_field(reg_fields, + hw_info_offset, + PCODE); + if (pcode < 0) { + dev_warn(dev, "Failed to obtain process code\n"); + /* Use default pcode */ + pcode = 0; + } + + substrate = sti_cpufreq_fetch_regmap_field(reg_fields, + hw_info_offset, + SUBSTRATE); + if (substrate) { + dev_warn(dev, "Failed to obtain substrate code\n"); + /* Use default substrate */ + substrate = DEFAULT_VERSION; + } + +use_defaults: + major = sti_cpufreq_fetch_major(); + if (major < 0) { + dev_err(dev, "Failed to obtain major version\n"); + /* Use default major number */ + major = DEFAULT_VERSION; + } + + minor = sti_cpufreq_fetch_minor(); + if (minor < 0) { + dev_err(dev, "Failed to obtain minor version\n"); + /* Use default minor number */ + minor = DEFAULT_VERSION; + } + + snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode); + + ret = dev_pm_opp_set_prop_name(dev, name); + if (ret) { + dev_err(dev, "Failed to set prop name\n"); + return ret; + } + + version[0] = BIT(major); + version[1] = BIT(minor); + version[2] = BIT(substrate); + + ret = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS); + if (ret) { + dev_err(dev, "Failed to set supported hardware\n"); + return ret; + } + + dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n", + pcode, major, minor, substrate); + dev_dbg(dev, "version[0]: %x version[1]: %x version[2]: %x\n", + version[0], version[1], version[2]); + + return 0; +} + +static int sti_cpufreq_fetch_syscon_regsiters(void) +{ + struct device *dev = ddata.cpu; + struct device_node *np = dev->of_node; + + ddata.syscfg = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); + if (IS_ERR(ddata.syscfg)) { + dev_err(dev, "\"st,syscfg\" not supplied\n"); + return PTR_ERR(ddata.syscfg); + } + + ddata.syscfg_eng = syscon_regmap_lookup_by_phandle(np, "st,syscfg-eng"); + if (IS_ERR(ddata.syscfg_eng)) { + dev_err(dev, "\"st,syscfg-eng\" not supplied\n"); + return PTR_ERR(ddata.syscfg_eng); + } + + return 0; +} + +static int sti_cpufreq_init(void) +{ + int ret; + + ddata.cpu = get_cpu_device(0); + if (!ddata.cpu) { + dev_err(ddata.cpu, "Failed to get device for CPU0\n"); + goto skip_voltage_scaling; + } + + if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) { + dev_err(ddata.cpu, "OPP-v2 not supported\n"); + goto skip_voltage_scaling; + } + + ret = sti_cpufreq_fetch_syscon_regsiters(); + if (ret) + goto skip_voltage_scaling; + + ret = sti_cpufreq_set_opp_info(); + if (!ret) + goto register_cpufreq_dt; + +skip_voltage_scaling: + dev_err(ddata.cpu, "Not doing voltage scaling\n"); + +register_cpufreq_dt: + platform_device_register_simple("cpufreq-dt", -1, NULL, 0); + + return 0; +} +module_init(sti_cpufreq_init); + +MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver"); +MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>"); +MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c index 18a7f7380508..66a9f231ec41 100644 --- a/drivers/cpuidle/cpuidle-clps711x.c +++ b/drivers/cpuidle/cpuidle-clps711x.c @@ -12,7 +12,7 @@ #include <linux/cpuidle.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle" @@ -56,8 +56,4 @@ static struct platform_driver clps711x_cpuidle_driver = { .name = CLPS711X_CPUIDLE_NAME, }, }; -module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe); - -MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); -MODULE_DESCRIPTION("CLPS711X CPU idle driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe); diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c index b5f0a9cc8185..00cd129b10a4 100644 --- a/drivers/cpuidle/cpuidle-exynos.c +++ b/drivers/cpuidle/cpuidle-exynos.c @@ -14,7 +14,7 @@ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/platform_data/cpuidle-exynos.h> @@ -142,5 +142,4 @@ static struct platform_driver exynos_cpuidle_driver = { .name = "exynos_cpuidle", }, }; - -module_platform_driver(exynos_cpuidle_driver); +builtin_platform_driver(exynos_cpuidle_driver); diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c index 8bf895c0017d..7941a090bea6 100644 --- a/drivers/cpuidle/cpuidle-ux500.c +++ b/drivers/cpuidle/cpuidle-ux500.c @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/cpuidle.h> #include <linux/spinlock.h> #include <linux/atomic.h> @@ -124,5 +124,4 @@ static struct platform_driver dbx500_cpuidle_plat_driver = { }, .probe = dbx500_cpuidle_probe, }; - -module_platform_driver(dbx500_cpuidle_plat_driver); +builtin_platform_driver(dbx500_cpuidle_plat_driver); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 22e4463d1787..7b0971d97cc3 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -330,7 +330,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) * We want to default to C1 (hlt), not to busy polling * unless the timer is happening really really soon. */ - if (data->next_timer_us > 5 && + if (interactivity_req > 20 && !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; @@ -404,8 +404,10 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) measured_us = cpuidle_get_last_residency(dev); /* Deduct exit latency */ - if (measured_us > target->exit_latency) + if (measured_us > 2 * target->exit_latency) measured_us -= target->exit_latency; + else + measured_us /= 2; /* Make sure our coefficients do not exceed unity */ if (measured_us > data->next_timer_us) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 2569e043317e..3dd69df9c970 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2 select CRYPTO_DES select CRYPTO_BLKCIPHER select CRYPTO_HASH + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 depends on SPARC64 help Each core of a Niagara2 processor contains a Stream @@ -378,10 +381,10 @@ config CRYPTO_DEV_BFIN_CRC config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" - depends on ARCH_AT91 + depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST select CRYPTO_AES + select CRYPTO_AEAD select CRYPTO_BLKCIPHER - select AT_HDMAC help Some Atmel processors have AES hw accelerator. Select this if you want to use the Atmel module for @@ -498,4 +501,15 @@ config CRYPTO_DEV_SUN4I_SS To compile this driver as a module, choose M here: the module will be called sun4i-ss. +config CRYPTO_DEV_ROCKCHIP + tristate "Rockchip's Cryptographic Engine driver" + depends on OF && ARCH_ROCKCHIP + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_BLKCIPHER + + help + This driver interfaces with the hardware crypto accelerator. + Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c3ced6fbd1b8..713de9d11148 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 58a630e55d5d..62134c8a2260 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, /* figure how many gd is needed */ num_gd = sg_nents_for_len(src, datalen); + if ((int)num_gd < 0) { + dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); + return -EINVAL; + } if (num_gd == 1) num_gd = 0; diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h index 2786bb1a5aa0..6c2951bb70b1 100644 --- a/drivers/crypto/atmel-aes-regs.h +++ b/drivers/crypto/atmel-aes-regs.h @@ -9,6 +9,7 @@ #define AES_MR 0x04 #define AES_MR_CYPHER_DEC (0 << 0) #define AES_MR_CYPHER_ENC (1 << 0) +#define AES_MR_GTAGEN (1 << 1) #define AES_MR_DUALBUFF (1 << 3) #define AES_MR_PROCDLY_MASK (0xF << 4) #define AES_MR_PROCDLY_OFFSET 4 @@ -26,6 +27,7 @@ #define AES_MR_OPMOD_OFB (0x2 << 12) #define AES_MR_OPMOD_CFB (0x3 << 12) #define AES_MR_OPMOD_CTR (0x4 << 12) +#define AES_MR_OPMOD_GCM (0x5 << 12) #define AES_MR_LOD (0x1 << 15) #define AES_MR_CFBS_MASK (0x7 << 16) #define AES_MR_CFBS_128b (0x0 << 16) @@ -44,6 +46,7 @@ #define AES_ISR 0x1C #define AES_INT_DATARDY (1 << 0) #define AES_INT_URAD (1 << 8) +#define AES_INT_TAGRDY (1 << 16) #define AES_ISR_URAT_MASK (0xF << 12) #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) @@ -57,6 +60,13 @@ #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) #define AES_IVR(x) (0x60 + ((x) * 0x04)) +#define AES_AADLENR 0x70 +#define AES_CLENR 0x74 +#define AES_GHASHR(x) (0x78 + ((x) * 0x04)) +#define AES_TAGR(x) (0x88 + ((x) * 0x04)) +#define AES_CTRR 0x98 +#define AES_GCMHR(x) (0x9c + ((x) * 0x04)) + #define AES_HW_VERSION 0xFC #endif /* __ATMEL_AES_REGS_H__ */ diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index fb16d812c8f5..5621612ee921 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -33,68 +33,118 @@ #include <linux/of_device.h> #include <linux/delay.h> #include <linux/crypto.h> -#include <linux/cryptohash.h> #include <crypto/scatterwalk.h> #include <crypto/algapi.h> #include <crypto/aes.h> -#include <crypto/hash.h> -#include <crypto/internal/hash.h> +#include <crypto/internal/aead.h> #include <linux/platform_data/crypto-atmel.h> #include <dt-bindings/dma/at91.h> #include "atmel-aes-regs.h" +#define ATMEL_AES_PRIORITY 300 + +#define ATMEL_AES_BUFFER_ORDER 2 +#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER) + #define CFB8_BLOCK_SIZE 1 #define CFB16_BLOCK_SIZE 2 #define CFB32_BLOCK_SIZE 4 #define CFB64_BLOCK_SIZE 8 +#define SIZE_IN_WORDS(x) ((x) >> 2) + /* AES flags */ -#define AES_FLAGS_MODE_MASK 0x03ff -#define AES_FLAGS_ENCRYPT BIT(0) -#define AES_FLAGS_CBC BIT(1) -#define AES_FLAGS_CFB BIT(2) -#define AES_FLAGS_CFB8 BIT(3) -#define AES_FLAGS_CFB16 BIT(4) -#define AES_FLAGS_CFB32 BIT(5) -#define AES_FLAGS_CFB64 BIT(6) -#define AES_FLAGS_CFB128 BIT(7) -#define AES_FLAGS_OFB BIT(8) -#define AES_FLAGS_CTR BIT(9) - -#define AES_FLAGS_INIT BIT(16) -#define AES_FLAGS_DMA BIT(17) -#define AES_FLAGS_BUSY BIT(18) -#define AES_FLAGS_FAST BIT(19) +/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */ +#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC +#define AES_FLAGS_GTAGEN AES_MR_GTAGEN +#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) +#define AES_FLAGS_ECB AES_MR_OPMOD_ECB +#define AES_FLAGS_CBC AES_MR_OPMOD_CBC +#define AES_FLAGS_OFB AES_MR_OPMOD_OFB +#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) +#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) +#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) +#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) +#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) +#define AES_FLAGS_CTR AES_MR_OPMOD_CTR +#define AES_FLAGS_GCM AES_MR_OPMOD_GCM + +#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ + AES_FLAGS_ENCRYPT | \ + AES_FLAGS_GTAGEN) + +#define AES_FLAGS_INIT BIT(2) +#define AES_FLAGS_BUSY BIT(3) +#define AES_FLAGS_DUMP_REG BIT(4) + +#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) #define ATMEL_AES_QUEUE_LENGTH 50 -#define ATMEL_AES_DMA_THRESHOLD 16 +#define ATMEL_AES_DMA_THRESHOLD 256 struct atmel_aes_caps { - bool has_dualbuff; - bool has_cfb64; - u32 max_burst_size; + bool has_dualbuff; + bool has_cfb64; + bool has_ctr32; + bool has_gcm; + u32 max_burst_size; }; struct atmel_aes_dev; + +typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); + + +struct atmel_aes_base_ctx { + struct atmel_aes_dev *dd; + atmel_aes_fn_t start; + int keylen; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u16 block_size; +}; + struct atmel_aes_ctx { - struct atmel_aes_dev *dd; + struct atmel_aes_base_ctx base; +}; + +struct atmel_aes_ctr_ctx { + struct atmel_aes_base_ctx base; + + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + size_t offset; + struct scatterlist src[2]; + struct scatterlist dst[2]; +}; - int keylen; - u32 key[AES_KEYSIZE_256 / sizeof(u32)]; +struct atmel_aes_gcm_ctx { + struct atmel_aes_base_ctx base; - u16 block_size; + struct scatterlist src[2]; + struct scatterlist dst[2]; + + u32 j0[AES_BLOCK_SIZE / sizeof(u32)]; + u32 tag[AES_BLOCK_SIZE / sizeof(u32)]; + u32 ghash[AES_BLOCK_SIZE / sizeof(u32)]; + size_t textlen; + + const u32 *ghash_in; + u32 *ghash_out; + atmel_aes_fn_t ghash_resume; }; struct atmel_aes_reqctx { - unsigned long mode; + unsigned long mode; }; struct atmel_aes_dma { - struct dma_chan *chan; - struct dma_slave_config dma_conf; + struct dma_chan *chan; + struct scatterlist *sg; + int nents; + unsigned int remainder; + unsigned int sg_len; }; struct atmel_aes_dev { @@ -102,13 +152,18 @@ struct atmel_aes_dev { unsigned long phys_base; void __iomem *io_base; - struct atmel_aes_ctx *ctx; + struct crypto_async_request *areq; + struct atmel_aes_base_ctx *ctx; + + bool is_async; + atmel_aes_fn_t resume; + atmel_aes_fn_t cpu_transfer_complete; + struct device *dev; struct clk *iclk; - int irq; + int irq; unsigned long flags; - int err; spinlock_t lock; struct crypto_queue queue; @@ -116,33 +171,21 @@ struct atmel_aes_dev { struct tasklet_struct done_task; struct tasklet_struct queue_task; - struct ablkcipher_request *req; - size_t total; + size_t total; + size_t datalen; + u32 *data; - struct scatterlist *in_sg; - unsigned int nb_in_sg; - size_t in_offset; - struct scatterlist *out_sg; - unsigned int nb_out_sg; - size_t out_offset; + struct atmel_aes_dma src; + struct atmel_aes_dma dst; - size_t bufcnt; - size_t buflen; - size_t dma_size; - - void *buf_in; - int dma_in; - dma_addr_t dma_addr_in; - struct atmel_aes_dma dma_lch_in; - - void *buf_out; - int dma_out; - dma_addr_t dma_addr_out; - struct atmel_aes_dma dma_lch_out; + size_t buflen; + void *buf; + struct scatterlist aligned_sg; + struct scatterlist *real_dst; struct atmel_aes_caps caps; - u32 hw_version; + u32 hw_version; }; struct atmel_aes_drv { @@ -155,71 +198,128 @@ static struct atmel_aes_drv atmel_aes = { .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), }; -static int atmel_aes_sg_length(struct ablkcipher_request *req, - struct scatterlist *sg) +#ifdef VERBOSE_DEBUG +static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) { - unsigned int total = req->nbytes; - int sg_nb; - unsigned int len; - struct scatterlist *sg_list; - - sg_nb = 0; - sg_list = sg; - total = req->nbytes; + switch (offset) { + case AES_CR: + return "CR"; + + case AES_MR: + return "MR"; + + case AES_ISR: + return "ISR"; + + case AES_IMR: + return "IMR"; + + case AES_IER: + return "IER"; + + case AES_IDR: + return "IDR"; + + case AES_KEYWR(0): + case AES_KEYWR(1): + case AES_KEYWR(2): + case AES_KEYWR(3): + case AES_KEYWR(4): + case AES_KEYWR(5): + case AES_KEYWR(6): + case AES_KEYWR(7): + snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2); + break; - while (total) { - len = min(sg_list->length, total); + case AES_IDATAR(0): + case AES_IDATAR(1): + case AES_IDATAR(2): + case AES_IDATAR(3): + snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2); + break; - sg_nb++; - total -= len; + case AES_ODATAR(0): + case AES_ODATAR(1): + case AES_ODATAR(2): + case AES_ODATAR(3): + snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2); + break; - sg_list = sg_next(sg_list); - if (!sg_list) - total = 0; - } + case AES_IVR(0): + case AES_IVR(1): + case AES_IVR(2): + case AES_IVR(3): + snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2); + break; - return sg_nb; -} + case AES_AADLENR: + return "AADLENR"; -static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, - void *buf, size_t buflen, size_t total, int out) -{ - unsigned int count, off = 0; + case AES_CLENR: + return "CLENR"; - while (buflen && total) { - count = min((*sg)->length - *offset, total); - count = min(count, buflen); + case AES_GHASHR(0): + case AES_GHASHR(1): + case AES_GHASHR(2): + case AES_GHASHR(3): + snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2); + break; - if (!count) - return off; + case AES_TAGR(0): + case AES_TAGR(1): + case AES_TAGR(2): + case AES_TAGR(3): + snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2); + break; - scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); + case AES_CTRR: + return "CTRR"; - off += count; - buflen -= count; - *offset += count; - total -= count; + case AES_GCMHR(0): + case AES_GCMHR(1): + case AES_GCMHR(2): + case AES_GCMHR(3): + snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); - if (*offset == (*sg)->length) { - *sg = sg_next(*sg); - if (*sg) - *offset = 0; - else - total = 0; - } + default: + snprintf(tmp, sz, "0x%02x", offset); + break; } - return off; + return tmp; } +#endif /* VERBOSE_DEBUG */ + +/* Shared functions */ static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) { - return readl_relaxed(dd->io_base + offset); + u32 value = readl_relaxed(dd->io_base + offset); + +#ifdef VERBOSE_DEBUG + if (dd->flags & AES_FLAGS_DUMP_REG) { + char tmp[16]; + + dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, + atmel_aes_reg_name(offset, tmp, sizeof(tmp))); + } +#endif /* VERBOSE_DEBUG */ + + return value; } static inline void atmel_aes_write(struct atmel_aes_dev *dd, u32 offset, u32 value) { +#ifdef VERBOSE_DEBUG + if (dd->flags & AES_FLAGS_DUMP_REG) { + char tmp[16]; + + dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, + atmel_aes_reg_name(offset, tmp)); + } +#endif /* VERBOSE_DEBUG */ + writel_relaxed(value, dd->io_base + offset); } @@ -231,13 +331,50 @@ static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, } static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, - u32 *value, int count) + const u32 *value, int count) { for (; count--; value++, offset += 4) atmel_aes_write(dd, offset, *value); } -static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) +static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset, + u32 *value) +{ + atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); +} + +static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset, + const u32 *value) +{ + atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); +} + +static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd, + atmel_aes_fn_t resume) +{ + u32 isr = atmel_aes_read(dd, AES_ISR); + + if (unlikely(isr & AES_INT_DATARDY)) + return resume(dd); + + dd->resume = resume; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; +} + +static inline size_t atmel_aes_padlen(size_t len, size_t block_size) +{ + len &= block_size - 1; + return len ? block_size - len : 0; +} + +static inline struct aead_request * +aead_request_cast(struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) { struct atmel_aes_dev *aes_dd = NULL; struct atmel_aes_dev *tmp; @@ -270,7 +407,6 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) atmel_aes_write(dd, AES_CR, AES_CR_SWRST); atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); dd->flags |= AES_FLAGS_INIT; - dd->err = 0; } return 0; @@ -281,552 +417,643 @@ static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; } -static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) +static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) { - atmel_aes_hw_init(dd); + int err; + + err = atmel_aes_hw_init(dd); + if (err) + return err; dd->hw_version = atmel_aes_get_version(dd); - dev_info(dd->dev, - "version: 0x%x\n", dd->hw_version); + dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); clk_disable_unprepare(dd->iclk); + return 0; } -static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) +static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, + const struct atmel_aes_reqctx *rctx) { - struct ablkcipher_request *req = dd->req; + /* Clear all but persistent flags and set request flags. */ + dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; +} +static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) +{ + return (dd->flags & AES_FLAGS_ENCRYPT); +} + +static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) +{ clk_disable_unprepare(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; - req->base.complete(&req->base, err); -} + if (dd->is_async) + dd->areq->complete(dd->areq, err); -static void atmel_aes_dma_callback(void *data) -{ - struct atmel_aes_dev *dd = data; + tasklet_schedule(&dd->queue_task); - /* dma_lch_out - completed */ - tasklet_schedule(&dd->done_task); + return err; } -static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, - dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) +static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, + const u32 *iv) { - struct scatterlist sg[2]; - struct dma_async_tx_descriptor *in_desc, *out_desc; + u32 valmr = 0; - dd->dma_size = length; + /* MR register must be set before IV registers */ + if (dd->ctx->keylen == AES_KEYSIZE_128) + valmr |= AES_MR_KEYSIZE_128; + else if (dd->ctx->keylen == AES_KEYSIZE_192) + valmr |= AES_MR_KEYSIZE_192; + else + valmr |= AES_MR_KEYSIZE_256; - dma_sync_single_for_device(dd->dev, dma_addr_in, length, - DMA_TO_DEVICE); - dma_sync_single_for_device(dd->dev, dma_addr_out, length, - DMA_FROM_DEVICE); + valmr |= dd->flags & AES_FLAGS_MODE_MASK; - if (dd->flags & AES_FLAGS_CFB8) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - } else if (dd->flags & AES_FLAGS_CFB16) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; + if (use_dma) { + valmr |= AES_MR_SMOD_IDATAR0; + if (dd->caps.has_dualbuff) + valmr |= AES_MR_DUALBUFF; } else { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; + valmr |= AES_MR_SMOD_AUTO; } - if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | - AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { - dd->dma_lch_in.dma_conf.src_maxburst = 1; - dd->dma_lch_in.dma_conf.dst_maxburst = 1; - dd->dma_lch_out.dma_conf.src_maxburst = 1; - dd->dma_lch_out.dma_conf.dst_maxburst = 1; - } else { - dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; - } + atmel_aes_write(dd, AES_MR, valmr); - dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); - dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); + atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, + SIZE_IN_WORDS(dd->ctx->keylen)); - dd->flags |= AES_FLAGS_DMA; + if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) + atmel_aes_write_block(dd, AES_IVR(0), iv); +} - sg_init_table(&sg[0], 1); - sg_dma_address(&sg[0]) = dma_addr_in; - sg_dma_len(&sg[0]) = length; - sg_init_table(&sg[1], 1); - sg_dma_address(&sg[1]) = dma_addr_out; - sg_dma_len(&sg[1]) = length; +/* CPU transfer */ - in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], - 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!in_desc) - return -EINVAL; +static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd) +{ + int err = 0; + u32 isr; - out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], - 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!out_desc) - return -EINVAL; + for (;;) { + atmel_aes_read_block(dd, AES_ODATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; - out_desc->callback = atmel_aes_dma_callback; - out_desc->callback_param = dd; + if (dd->datalen < AES_BLOCK_SIZE) + break; - dmaengine_submit(out_desc); - dma_async_issue_pending(dd->dma_lch_out.chan); + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); - dmaengine_submit(in_desc); - dma_async_issue_pending(dd->dma_lch_in.chan); + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_cpu_transfer; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; + } + } - return 0; + if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), + dd->buf, dd->total)) + err = -EINVAL; + + if (err) + return atmel_aes_complete(dd, err); + + return dd->cpu_transfer_complete(dd); } -static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) +static int atmel_aes_cpu_start(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len, + atmel_aes_fn_t resume) { - dd->flags &= ~AES_FLAGS_DMA; + size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE); - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, - dd->dma_size, DMA_TO_DEVICE); - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* use cache buffers */ - dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); - if (!dd->nb_in_sg) + if (unlikely(len == 0)) return -EINVAL; - dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); - if (!dd->nb_out_sg) - return -EINVAL; + sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); - dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, - dd->buf_in, dd->total); + dd->total = len; + dd->real_dst = dst; + dd->cpu_transfer_complete = resume; + dd->datalen = len + padlen; + dd->data = (u32 *)dd->buf; + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer); +} - if (!dd->bufcnt) - return -EINVAL; - dd->total -= dd->bufcnt; +/* DMA transfer */ - atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); - atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, - dd->bufcnt >> 2); +static void atmel_aes_dma_callback(void *data); - return 0; +static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd, + struct scatterlist *sg, + size_t len, + struct atmel_aes_dma *dma) +{ + int nents; + + if (!IS_ALIGNED(len, dd->ctx->block_size)) + return false; + + for (nents = 0; sg; sg = sg_next(sg), ++nents) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) + return false; + + if (len <= sg->length) { + if (!IS_ALIGNED(len, dd->ctx->block_size)) + return false; + + dma->nents = nents+1; + dma->remainder = sg->length - len; + sg->length = len; + return true; + } + + if (!IS_ALIGNED(sg->length, dd->ctx->block_size)) + return false; + + len -= sg->length; + } + + return false; } -static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) +static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma) { - int err, fast = 0, in, out; - size_t count; - dma_addr_t addr_in, addr_out; + struct scatterlist *sg = dma->sg; + int nents = dma->nents; - if ((!dd->in_offset) && (!dd->out_offset)) { - /* check for alignment */ - in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && - IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); - out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && - IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); - fast = in && out; + if (!dma->remainder) + return; - if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) - fast = 0; - } + while (--nents > 0 && sg) + sg = sg_next(sg); + if (!sg) + return; - if (fast) { - count = min(dd->total, sg_dma_len(dd->in_sg)); - count = min(count, sg_dma_len(dd->out_sg)); + sg->length += dma->remainder; +} - err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; - } +static int atmel_aes_map(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len) +{ + bool src_aligned, dst_aligned; + size_t padlen; + + dd->total = len; + dd->src.sg = src; + dd->dst.sg = dst; + dd->real_dst = dst; - err = dma_map_sg(dd->dev, dd->out_sg, 1, - DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - dma_unmap_sg(dd->dev, dd->in_sg, 1, - DMA_TO_DEVICE); - return -EINVAL; + src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src); + if (src == dst) + dst_aligned = src_aligned; + else + dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst); + if (!src_aligned || !dst_aligned) { + padlen = atmel_aes_padlen(len, dd->ctx->block_size); + + if (dd->buflen < len + padlen) + return -ENOMEM; + + if (!src_aligned) { + sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); + dd->src.sg = &dd->aligned_sg; + dd->src.nents = 1; + dd->src.remainder = 0; } - addr_in = sg_dma_address(dd->in_sg); - addr_out = sg_dma_address(dd->out_sg); + if (!dst_aligned) { + dd->dst.sg = &dd->aligned_sg; + dd->dst.nents = 1; + dd->dst.remainder = 0; + } - dd->flags |= AES_FLAGS_FAST; + sg_init_table(&dd->aligned_sg, 1); + sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen); + } + if (dd->src.sg == dd->dst.sg) { + dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_BIDIRECTIONAL); + dd->dst.sg_len = dd->src.sg_len; + if (!dd->src.sg_len) + return -EFAULT; } else { - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, - dd->dma_size, DMA_TO_DEVICE); + dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); + if (!dd->src.sg_len) + return -EFAULT; + + dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents, + DMA_FROM_DEVICE); + if (!dd->dst.sg_len) { + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); + return -EFAULT; + } + } - /* use cache buffers */ - count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, - dd->buf_in, dd->buflen, dd->total, 0); + return 0; +} - addr_in = dd->dma_addr_in; - addr_out = dd->dma_addr_out; +static void atmel_aes_unmap(struct atmel_aes_dev *dd) +{ + if (dd->src.sg == dd->dst.sg) { + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_BIDIRECTIONAL); - dd->flags &= ~AES_FLAGS_FAST; - } + if (dd->src.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->src); + } else { + dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents, + DMA_FROM_DEVICE); - dd->total -= count; + if (dd->dst.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->dst); - err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); - if (err && (dd->flags & AES_FLAGS_FAST)) { - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + if (dd->src.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->src); } - return err; + if (dd->dst.sg == &dd->aligned_sg) + sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), + dd->buf, dd->total); } -static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) +static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, + enum dma_slave_buswidth addr_width, + enum dma_transfer_direction dir, + u32 maxburst) { + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config; + dma_async_tx_callback callback; + struct atmel_aes_dma *dma; int err; - u32 valcr = 0, valmr = 0; - err = atmel_aes_hw_init(dd); + memset(&config, 0, sizeof(config)); + config.direction = dir; + config.src_addr_width = addr_width; + config.dst_addr_width = addr_width; + config.src_maxburst = maxburst; + config.dst_maxburst = maxburst; + + switch (dir) { + case DMA_MEM_TO_DEV: + dma = &dd->src; + callback = NULL; + config.dst_addr = dd->phys_base + AES_IDATAR(0); + break; + + case DMA_DEV_TO_MEM: + dma = &dd->dst; + callback = atmel_aes_dma_callback; + config.src_addr = dd->phys_base + AES_ODATAR(0); + break; + default: + return -EINVAL; + } + + err = dmaengine_slave_config(dma->chan, &config); if (err) return err; - /* MR register must be set before IV registers */ - if (dd->ctx->keylen == AES_KEYSIZE_128) - valmr |= AES_MR_KEYSIZE_128; - else if (dd->ctx->keylen == AES_KEYSIZE_192) - valmr |= AES_MR_KEYSIZE_192; - else - valmr |= AES_MR_KEYSIZE_256; + desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -ENOMEM; - if (dd->flags & AES_FLAGS_CBC) { - valmr |= AES_MR_OPMOD_CBC; - } else if (dd->flags & AES_FLAGS_CFB) { - valmr |= AES_MR_OPMOD_CFB; - if (dd->flags & AES_FLAGS_CFB8) - valmr |= AES_MR_CFBS_8b; - else if (dd->flags & AES_FLAGS_CFB16) - valmr |= AES_MR_CFBS_16b; - else if (dd->flags & AES_FLAGS_CFB32) - valmr |= AES_MR_CFBS_32b; - else if (dd->flags & AES_FLAGS_CFB64) - valmr |= AES_MR_CFBS_64b; - else if (dd->flags & AES_FLAGS_CFB128) - valmr |= AES_MR_CFBS_128b; - } else if (dd->flags & AES_FLAGS_OFB) { - valmr |= AES_MR_OPMOD_OFB; - } else if (dd->flags & AES_FLAGS_CTR) { - valmr |= AES_MR_OPMOD_CTR; - } else { - valmr |= AES_MR_OPMOD_ECB; - } + desc->callback = callback; + desc->callback_param = dd; + dmaengine_submit(desc); + dma_async_issue_pending(dma->chan); - if (dd->flags & AES_FLAGS_ENCRYPT) - valmr |= AES_MR_CYPHER_ENC; + return 0; +} - if (dd->total > ATMEL_AES_DMA_THRESHOLD) { - valmr |= AES_MR_SMOD_IDATAR0; - if (dd->caps.has_dualbuff) - valmr |= AES_MR_DUALBUFF; - } else { - valmr |= AES_MR_SMOD_AUTO; +static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd, + enum dma_transfer_direction dir) +{ + struct atmel_aes_dma *dma; + + switch (dir) { + case DMA_MEM_TO_DEV: + dma = &dd->src; + break; + + case DMA_DEV_TO_MEM: + dma = &dd->dst; + break; + + default: + return; } - atmel_aes_write(dd, AES_CR, valcr); - atmel_aes_write(dd, AES_MR, valmr); + dmaengine_terminate_all(dma->chan); +} - atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, - dd->ctx->keylen >> 2); +static int atmel_aes_dma_start(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len, + atmel_aes_fn_t resume) +{ + enum dma_slave_buswidth addr_width; + u32 maxburst; + int err; + + switch (dd->ctx->block_size) { + case CFB8_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + maxburst = 1; + break; + + case CFB16_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + maxburst = 1; + break; + + case CFB32_BLOCK_SIZE: + case CFB64_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = 1; + break; + + case AES_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = dd->caps.max_burst_size; + break; - if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || - (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && - dd->req->info) { - atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); + default: + err = -EINVAL; + goto exit; } - return 0; + err = atmel_aes_map(dd, src, dst, len); + if (err) + goto exit; + + dd->resume = resume; + + /* Set output DMA transfer first */ + err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM, + maxburst); + if (err) + goto unmap; + + /* Then set input DMA transfer */ + err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV, + maxburst); + if (err) + goto output_transfer_stop; + + return -EINPROGRESS; + +output_transfer_stop: + atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); +unmap: + atmel_aes_unmap(dd); +exit: + return atmel_aes_complete(dd, err); +} + +static void atmel_aes_dma_stop(struct atmel_aes_dev *dd) +{ + atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV); + atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); + atmel_aes_unmap(dd); +} + +static void atmel_aes_dma_callback(void *data) +{ + struct atmel_aes_dev *dd = data; + + atmel_aes_dma_stop(dd); + dd->is_async = true; + (void)dd->resume(dd); } static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, - struct ablkcipher_request *req) + struct crypto_async_request *new_areq) { - struct crypto_async_request *async_req, *backlog; - struct atmel_aes_ctx *ctx; - struct atmel_aes_reqctx *rctx; + struct crypto_async_request *areq, *backlog; + struct atmel_aes_base_ctx *ctx; unsigned long flags; int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); - if (req) - ret = ablkcipher_enqueue_request(&dd->queue, req); + if (new_areq) + ret = crypto_enqueue_request(&dd->queue, new_areq); if (dd->flags & AES_FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); - async_req = crypto_dequeue_request(&dd->queue); - if (async_req) + areq = crypto_dequeue_request(&dd->queue); + if (areq) dd->flags |= AES_FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); - if (!async_req) + if (!areq) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); - req = ablkcipher_request_cast(async_req); + ctx = crypto_tfm_ctx(areq->tfm); - /* assign new request to device */ - dd->req = req; - dd->total = req->nbytes; - dd->in_offset = 0; - dd->in_sg = req->src; - dd->out_offset = 0; - dd->out_sg = req->dst; - - rctx = ablkcipher_request_ctx(req); - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); - rctx->mode &= AES_FLAGS_MODE_MASK; - dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; + dd->areq = areq; dd->ctx = ctx; - ctx->dd = dd; + dd->is_async = (areq != new_areq); - err = atmel_aes_write_ctrl(dd); - if (!err) { - if (dd->total > ATMEL_AES_DMA_THRESHOLD) - err = atmel_aes_crypt_dma_start(dd); - else - err = atmel_aes_crypt_cpu_start(dd); - } - if (err) { - /* aes_task will not finish it, so do it here */ - atmel_aes_finish_req(dd, err); - tasklet_schedule(&dd->queue_task); - } - - return ret; + err = ctx->start(dd); + return (dd->is_async) ? ret : err; } -static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) -{ - int err = -EINVAL; - size_t count; - if (dd->flags & AES_FLAGS_DMA) { - err = 0; - if (dd->flags & AES_FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - } else { - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* copy data */ - count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, - dd->buf_out, dd->buflen, dd->dma_size, 1); - if (count != dd->dma_size) { - err = -EINVAL; - pr_err("not all data converted: %u\n", count); - } - } - } +/* AES async block ciphers */ - return err; +static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) +{ + return atmel_aes_complete(dd, 0); } - -static int atmel_aes_buff_init(struct atmel_aes_dev *dd) +static int atmel_aes_start(struct atmel_aes_dev *dd) { - int err = -ENOMEM; - - dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); - dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); - dd->buflen = PAGE_SIZE; - dd->buflen &= ~(AES_BLOCK_SIZE - 1); - - if (!dd->buf_in || !dd->buf_out) { - dev_err(dd->dev, "unable to alloc pages.\n"); - goto err_alloc; - } + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD || + dd->ctx->block_size != AES_BLOCK_SIZE); + int err; - /* MAP here */ - dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, - dd->buflen, DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_in; - } + atmel_aes_set_mode(dd, rctx); - dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, - dd->buflen, DMA_FROM_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_out; - } + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); - return 0; + atmel_aes_write_ctrl(dd, use_dma, req->info); + if (use_dma) + return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes, + atmel_aes_transfer_complete); -err_map_out: - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, - DMA_TO_DEVICE); -err_map_in: -err_alloc: - free_page((unsigned long)dd->buf_out); - free_page((unsigned long)dd->buf_in); - if (err) - pr_err("error: %d\n", err); - return err; + return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes, + atmel_aes_transfer_complete); } -static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) +static inline struct atmel_aes_ctr_ctx * +atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) { - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, - DMA_TO_DEVICE); - free_page((unsigned long)dd->buf_out); - free_page((unsigned long)dd->buf_in); + return container_of(ctx, struct atmel_aes_ctr_ctx, base); } -static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) { - struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); - struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); - struct atmel_aes_dev *dd; - - if (mode & AES_FLAGS_CFB8) { - if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB8 blocks\n"); - return -EINVAL; - } - ctx->block_size = CFB8_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB16) { - if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB16 blocks\n"); - return -EINVAL; - } - ctx->block_size = CFB16_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB32) { - if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB32 blocks\n"); - return -EINVAL; - } - ctx->block_size = CFB32_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB64) { - if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB64 blocks\n"); - return -EINVAL; + struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct scatterlist *src, *dst; + u32 ctr, blocks; + size_t datalen; + bool use_dma, fragmented = false; + + /* Check for transfer completion. */ + ctx->offset += dd->total; + if (ctx->offset >= req->nbytes) + return atmel_aes_transfer_complete(dd); + + /* Compute data length. */ + datalen = req->nbytes - ctx->offset; + blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); + ctr = be32_to_cpu(ctx->iv[3]); + if (dd->caps.has_ctr32) { + /* Check 32bit counter overflow. */ + u32 start = ctr; + u32 end = start + blocks - 1; + + if (end < start) { + ctr |= 0xffffffff; + datalen = AES_BLOCK_SIZE * -start; + fragmented = true; } - ctx->block_size = CFB64_BLOCK_SIZE; } else { - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { - pr_err("request size is not exact amount of AES blocks\n"); - return -EINVAL; + /* Check 16bit counter overflow. */ + u16 start = ctr & 0xffff; + u16 end = start + (u16)blocks - 1; + + if (blocks >> 16 || end < start) { + ctr |= 0xffff; + datalen = AES_BLOCK_SIZE * (0x10000-start); + fragmented = true; } - ctx->block_size = AES_BLOCK_SIZE; + } + use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); + + /* Jump to offset. */ + src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset); + dst = ((req->src == req->dst) ? src : + scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset)); + + /* Configure hardware. */ + atmel_aes_write_ctrl(dd, use_dma, ctx->iv); + if (unlikely(fragmented)) { + /* + * Increment the counter manually to cope with the hardware + * counter overflow. + */ + ctx->iv[3] = cpu_to_be32(ctr); + crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); } - dd = atmel_aes_find_dev(ctx); - if (!dd) - return -ENODEV; - - rctx->mode = mode; + if (use_dma) + return atmel_aes_dma_start(dd, src, dst, datalen, + atmel_aes_ctr_transfer); - return atmel_aes_handle_queue(dd, req); + return atmel_aes_cpu_start(dd, src, dst, datalen, + atmel_aes_ctr_transfer); } -static bool atmel_aes_filter(struct dma_chan *chan, void *slave) +static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) { - struct at_dma_slave *sl = slave; + struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + int err; - if (sl && sl->dma_dev == chan->device->dev) { - chan->private = sl; - return true; - } else { - return false; - } + atmel_aes_set_mode(dd, rctx); + + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); + + memcpy(ctx->iv, req->info, AES_BLOCK_SIZE); + ctx->offset = 0; + dd->total = 0; + return atmel_aes_ctr_transfer(dd); } -static int atmel_aes_dma_init(struct atmel_aes_dev *dd, - struct crypto_platform_data *pdata) +static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { - int err = -ENOMEM; - dma_cap_mask_t mask; + struct atmel_aes_base_ctx *ctx; + struct atmel_aes_reqctx *rctx; + struct atmel_aes_dev *dd; - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + switch (mode & AES_FLAGS_OPMODE_MASK) { + case AES_FLAGS_CFB8: + ctx->block_size = CFB8_BLOCK_SIZE; + break; - /* Try to grab 2 DMA channels */ - dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, - atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); - if (!dd->dma_lch_in.chan) - goto err_dma_in; + case AES_FLAGS_CFB16: + ctx->block_size = CFB16_BLOCK_SIZE; + break; - dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; - dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + - AES_IDATAR(0); - dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_in.dma_conf.device_fc = false; - - dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, - atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); - if (!dd->dma_lch_out.chan) - goto err_dma_out; + case AES_FLAGS_CFB32: + ctx->block_size = CFB32_BLOCK_SIZE; + break; - dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; - dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + - AES_ODATAR(0); - dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.device_fc = false; + case AES_FLAGS_CFB64: + ctx->block_size = CFB64_BLOCK_SIZE; + break; - return 0; + default: + ctx->block_size = AES_BLOCK_SIZE; + break; + } -err_dma_out: - dma_release_channel(dd->dma_lch_in.chan); -err_dma_in: - dev_warn(dd->dev, "no DMA channel available\n"); - return err; -} + dd = atmel_aes_find_dev(ctx); + if (!dd) + return -ENODEV; -static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) -{ - dma_release_channel(dd->dma_lch_in.chan); - dma_release_channel(dd->dma_lch_out.chan); + rctx = ablkcipher_request_ctx(req); + rctx->mode = mode; + + return atmel_aes_handle_queue(dd, &req->base); } static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -839,115 +1066,110 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT); + return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); } static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - 0); + return atmel_aes_crypt(req, AES_FLAGS_ECB); } static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); + return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); } static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CBC); + return atmel_aes_crypt(req, AES_FLAGS_CBC); } static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); + return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT); } static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_OFB); + return atmel_aes_crypt(req, AES_FLAGS_OFB); } static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); + return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB128); + return atmel_aes_crypt(req, AES_FLAGS_CFB128); } static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); + return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB64); + return atmel_aes_crypt(req, AES_FLAGS_CFB64); } static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); + return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB32); + return atmel_aes_crypt(req, AES_FLAGS_CFB32); } static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); + return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB16); + return atmel_aes_crypt(req, AES_FLAGS_CFB16); } static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); + return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB8); + return atmel_aes_crypt(req, AES_FLAGS_CFB8); } static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); + return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT); } static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CTR); + return atmel_aes_crypt(req, AES_FLAGS_CTR); } static int atmel_aes_cra_init(struct crypto_tfm *tfm) { + struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); + ctx->base.start = atmel_aes_start; + + return 0; +} + +static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); + ctx->base.start = atmel_aes_ctr_start; return 0; } @@ -960,7 +1182,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "atmel-ecb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -980,7 +1202,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "atmel-cbc-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1001,7 +1223,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ofb(aes)", .cra_driver_name = "atmel-ofb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1022,7 +1244,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb(aes)", .cra_driver_name = "atmel-cfb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1043,7 +1265,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb32(aes)", .cra_driver_name = "atmel-cfb32-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1064,7 +1286,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb16(aes)", .cra_driver_name = "atmel-cfb16-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB16_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1085,7 +1307,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb8(aes)", .cra_driver_name = "atmel-cfb8-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB8_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1106,14 +1328,14 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ctr(aes)", .cra_driver_name = "atmel-ctr-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), .cra_alignmask = 0xf, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, - .cra_init = atmel_aes_cra_init, + .cra_init = atmel_aes_ctr_cra_init, .cra_exit = atmel_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -1129,7 +1351,7 @@ static struct crypto_alg aes_algs[] = { static struct crypto_alg aes_cfb64_alg = { .cra_name = "cfb64(aes)", .cra_driver_name = "atmel-cfb64-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB64_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1148,53 +1370,496 @@ static struct crypto_alg aes_cfb64_alg = { } }; -static void atmel_aes_queue_task(unsigned long data) + +/* gcm aead functions */ + +static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, + const u32 *data, size_t datalen, + const u32 *ghash_in, u32 *ghash_out, + atmel_aes_fn_t resume); +static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd); + +static int atmel_aes_gcm_start(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_process(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_length(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_data(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd); + +static inline struct atmel_aes_gcm_ctx * +atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx) { - struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; + return container_of(ctx, struct atmel_aes_gcm_ctx, base); +} - atmel_aes_handle_queue(dd, NULL); +static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, + const u32 *data, size_t datalen, + const u32 *ghash_in, u32 *ghash_out, + atmel_aes_fn_t resume) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + + dd->data = (u32 *)data; + dd->datalen = datalen; + ctx->ghash_in = ghash_in; + ctx->ghash_out = ghash_out; + ctx->ghash_resume = resume; + + atmel_aes_write_ctrl(dd, false, NULL); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init); } -static void atmel_aes_done_task(unsigned long data) +static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd) { - struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + + /* Set the data length. */ + atmel_aes_write(dd, AES_AADLENR, dd->total); + atmel_aes_write(dd, AES_CLENR, 0); + + /* If needed, overwrite the GCM Intermediate Hash Word Registers */ + if (ctx->ghash_in) + atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in); + + return atmel_aes_gcm_ghash_finalize(dd); +} + +static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + u32 isr; + + /* Write data into the Input Data Registers. */ + while (dd->datalen > 0) { + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; + + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_gcm_ghash_finalize; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; + } + } + + /* Read the computed hash from GHASHRx. */ + atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out); + + return ctx->ghash_resume(dd); +} + + +static int atmel_aes_gcm_start(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct atmel_aes_reqctx *rctx = aead_request_ctx(req); + size_t ivsize = crypto_aead_ivsize(tfm); + size_t datalen, padlen; + const void *iv = req->iv; + u8 *data = dd->buf; int err; - if (!(dd->flags & AES_FLAGS_DMA)) { - atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, - dd->bufcnt >> 2); + atmel_aes_set_mode(dd, rctx); - if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, - dd->buf_out, dd->bufcnt)) - err = 0; - else - err = -EINVAL; + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); + + if (likely(ivsize == 12)) { + memcpy(ctx->j0, iv, ivsize); + ctx->j0[3] = cpu_to_be32(1); + return atmel_aes_gcm_process(dd); + } + + padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE); + datalen = ivsize + padlen + AES_BLOCK_SIZE; + if (datalen > dd->buflen) + return atmel_aes_complete(dd, -EINVAL); + + memcpy(data, iv, ivsize); + memset(data + ivsize, 0, padlen + sizeof(u64)); + ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8); + + return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen, + NULL, ctx->j0, atmel_aes_gcm_process); +} - goto cpu_end; +static int atmel_aes_gcm_process(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + bool enc = atmel_aes_is_encrypt(dd); + u32 authsize; + + /* Compute text length. */ + authsize = crypto_aead_authsize(tfm); + ctx->textlen = req->cryptlen - (enc ? 0 : authsize); + + /* + * According to tcrypt test suite, the GCM Automatic Tag Generation + * fails when both the message and its associated data are empty. + */ + if (likely(req->assoclen != 0 || ctx->textlen != 0)) + dd->flags |= AES_FLAGS_GTAGEN; + + atmel_aes_write_ctrl(dd, false, NULL); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length); +} + +static int atmel_aes_gcm_length(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + u32 j0_lsw, *j0 = ctx->j0; + size_t padlen; + + /* Write incr32(J0) into IV. */ + j0_lsw = j0[3]; + j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1); + atmel_aes_write_block(dd, AES_IVR(0), j0); + j0[3] = j0_lsw; + + /* Set aad and text lengths. */ + atmel_aes_write(dd, AES_AADLENR, req->assoclen); + atmel_aes_write(dd, AES_CLENR, ctx->textlen); + + /* Check whether AAD are present. */ + if (unlikely(req->assoclen == 0)) { + dd->datalen = 0; + return atmel_aes_gcm_data(dd); } - err = atmel_aes_crypt_dma_stop(dd); + /* Copy assoc data and add padding. */ + padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE); + if (unlikely(req->assoclen + padlen > dd->buflen)) + return atmel_aes_complete(dd, -EINVAL); + sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen); - err = dd->err ? : err; + /* Write assoc data into the Input Data register. */ + dd->data = (u32 *)dd->buf; + dd->datalen = req->assoclen + padlen; + return atmel_aes_gcm_data(dd); +} - if (dd->total && !err) { - if (dd->flags & AES_FLAGS_FAST) { - dd->in_sg = sg_next(dd->in_sg); - dd->out_sg = sg_next(dd->out_sg); - if (!dd->in_sg || !dd->out_sg) - err = -EINVAL; +static int atmel_aes_gcm_data(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD); + struct scatterlist *src, *dst; + u32 isr, mr; + + /* Write AAD first. */ + while (dd->datalen > 0) { + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; + + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_gcm_data; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; } - if (!err) - err = atmel_aes_crypt_dma_start(dd); - if (!err) - return; /* DMA started. Not fininishing. */ } -cpu_end: - atmel_aes_finish_req(dd, err); + /* GMAC only. */ + if (unlikely(ctx->textlen == 0)) + return atmel_aes_gcm_tag_init(dd); + + /* Prepare src and dst scatter lists to transfer cipher/plain texts */ + src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen); + dst = ((req->src == req->dst) ? src : + scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen)); + + if (use_dma) { + /* Update the Mode Register for DMA transfers. */ + mr = atmel_aes_read(dd, AES_MR); + mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF); + mr |= AES_MR_SMOD_IDATAR0; + if (dd->caps.has_dualbuff) + mr |= AES_MR_DUALBUFF; + atmel_aes_write(dd, AES_MR, mr); + + return atmel_aes_dma_start(dd, src, dst, ctx->textlen, + atmel_aes_gcm_tag_init); + } + + return atmel_aes_cpu_start(dd, src, dst, ctx->textlen, + atmel_aes_gcm_tag_init); +} + +static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + u64 *data = dd->buf; + + if (likely(dd->flags & AES_FLAGS_GTAGEN)) { + if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) { + dd->resume = atmel_aes_gcm_tag_init; + atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY); + return -EINPROGRESS; + } + + return atmel_aes_gcm_finalize(dd); + } + + /* Read the GCM Intermediate Hash Word Registers. */ + atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash); + + data[0] = cpu_to_be64(req->assoclen * 8); + data[1] = cpu_to_be64(ctx->textlen * 8); + + return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE, + ctx->ghash, ctx->ghash, atmel_aes_gcm_tag); +} + +static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + unsigned long flags; + + /* + * Change mode to CTR to complete the tag generation. + * Use J0 as Initialization Vector. + */ + flags = dd->flags; + dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN); + dd->flags |= AES_FLAGS_CTR; + atmel_aes_write_ctrl(dd, false, ctx->j0); + dd->flags = flags; + + atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize); +} + +static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + bool enc = atmel_aes_is_encrypt(dd); + u32 offset, authsize, itag[4], *otag = ctx->tag; + int err; + + /* Read the computed tag. */ + if (likely(dd->flags & AES_FLAGS_GTAGEN)) + atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag); + else + atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag); + + offset = req->assoclen + ctx->textlen; + authsize = crypto_aead_authsize(tfm); + if (enc) { + scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1); + err = 0; + } else { + scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0); + err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0; + } + + return atmel_aes_complete(dd, err); +} + +static int atmel_aes_gcm_crypt(struct aead_request *req, + unsigned long mode) +{ + struct atmel_aes_base_ctx *ctx; + struct atmel_aes_reqctx *rctx; + struct atmel_aes_dev *dd; + + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + ctx->block_size = AES_BLOCK_SIZE; + + dd = atmel_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx = aead_request_ctx(req); + rctx->mode = AES_FLAGS_GCM | mode; + + return atmel_aes_handle_queue(dd, &req->base); +} + +static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); + + if (keylen != AES_KEYSIZE_256 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_128) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + /* Same as crypto_gcm_authsize() from crypto/gcm.c */ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int atmel_aes_gcm_encrypt(struct aead_request *req) +{ + return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); +} + +static int atmel_aes_gcm_decrypt(struct aead_request *req) +{ + return atmel_aes_gcm_crypt(req, 0); +} + +static int atmel_aes_gcm_init(struct crypto_aead *tfm) +{ + struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); + ctx->base.start = atmel_aes_gcm_start; + + return 0; +} + +static void atmel_aes_gcm_exit(struct crypto_aead *tfm) +{ + +} + +static struct aead_alg aes_gcm_alg = { + .setkey = atmel_aes_gcm_setkey, + .setauthsize = atmel_aes_gcm_setauthsize, + .encrypt = atmel_aes_gcm_encrypt, + .decrypt = atmel_aes_gcm_decrypt, + .init = atmel_aes_gcm_init, + .exit = atmel_aes_gcm_exit, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "atmel-gcm-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}; + + +/* Probe functions */ + +static int atmel_aes_buff_init(struct atmel_aes_dev *dd) +{ + dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); + dd->buflen = ATMEL_AES_BUFFER_SIZE; + dd->buflen &= ~(AES_BLOCK_SIZE - 1); + + if (!dd->buf) { + dev_err(dd->dev, "unable to alloc pages.\n"); + return -ENOMEM; + } + + return 0; +} + +static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) +{ + free_page((unsigned long)dd->buf); +} + +static bool atmel_aes_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *sl = slave; + + if (sl && sl->dma_dev == chan->device->dev) { + chan->private = sl; + return true; + } else { + return false; + } +} + +static int atmel_aes_dma_init(struct atmel_aes_dev *dd, + struct crypto_platform_data *pdata) +{ + struct at_dma_slave *slave; + int err = -ENOMEM; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Try to grab 2 DMA channels */ + slave = &pdata->dma_slave->rxdata; + dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "tx"); + if (!dd->src.chan) + goto err_dma_in; + + slave = &pdata->dma_slave->txdata; + dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "rx"); + if (!dd->dst.chan) + goto err_dma_out; + + return 0; + +err_dma_out: + dma_release_channel(dd->src.chan); +err_dma_in: + dev_warn(dd->dev, "no DMA channel available\n"); + return err; +} + +static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) +{ + dma_release_channel(dd->dst.chan); + dma_release_channel(dd->src.chan); +} + +static void atmel_aes_queue_task(unsigned long data) +{ + struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; + atmel_aes_handle_queue(dd, NULL); } +static void atmel_aes_done_task(unsigned long data) +{ + struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; + + dd->is_async = true; + (void)dd->resume(dd); +} + static irqreturn_t atmel_aes_irq(int irq, void *dev_id) { struct atmel_aes_dev *aes_dd = dev_id; @@ -1217,10 +1882,14 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) - crypto_unregister_alg(&aes_algs[i]); + if (dd->caps.has_gcm) + crypto_unregister_aead(&aes_gcm_alg); + if (dd->caps.has_cfb64) crypto_unregister_alg(&aes_cfb64_alg); + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) + crypto_unregister_alg(&aes_algs[i]); } static int atmel_aes_register_algs(struct atmel_aes_dev *dd) @@ -1239,8 +1908,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) goto err_aes_cfb64_alg; } + if (dd->caps.has_gcm) { + err = crypto_register_aead(&aes_gcm_alg); + if (err) + goto err_aes_gcm_alg; + } + return 0; +err_aes_gcm_alg: + crypto_unregister_alg(&aes_cfb64_alg); err_aes_cfb64_alg: i = ARRAY_SIZE(aes_algs); err_aes_algs: @@ -1254,13 +1931,24 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) { dd->caps.has_dualbuff = 0; dd->caps.has_cfb64 = 0; + dd->caps.has_ctr32 = 0; + dd->caps.has_gcm = 0; dd->caps.max_burst_size = 1; /* keep only major version number */ switch (dd->hw_version & 0xff0) { + case 0x500: + dd->caps.has_dualbuff = 1; + dd->caps.has_cfb64 = 1; + dd->caps.has_ctr32 = 1; + dd->caps.has_gcm = 1; + dd->caps.max_burst_size = 4; + break; case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; + dd->caps.has_ctr32 = 1; + dd->caps.has_gcm = 1; dd->caps.max_burst_size = 4; break; case 0x130: @@ -1402,7 +2090,9 @@ static int atmel_aes_probe(struct platform_device *pdev) goto res_err; } - atmel_aes_hw_version_init(aes_dd); + err = atmel_aes_hw_version_init(aes_dd); + if (err) + goto res_err; atmel_aes_get_cap(aes_dd); @@ -1423,8 +2113,8 @@ static int atmel_aes_probe(struct platform_device *pdev) goto err_algs; dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", - dma_chan_name(aes_dd->dma_lch_in.chan), - dma_chan_name(aes_dd->dma_lch_out.chan)); + dma_chan_name(aes_dd->src.chan), + dma_chan_name(aes_dd->dst.chan)); return 0; @@ -1462,6 +2152,7 @@ static int atmel_aes_remove(struct platform_device *pdev) tasklet_kill(&aes_dd->queue_task); atmel_aes_dma_cleanup(aes_dd); + atmel_aes_buff_cleanup(aes_dd); return 0; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 660d8c06540b..20de861aa0ea 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); struct atmel_sha_dev *dd = ctx->dd; - int err = 0; if (ctx->digcnt[0] || ctx->digcnt[1]) atmel_sha_copy_ready_hash(req); @@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req) dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt); - return err; + return 0; } static void atmel_sha_finish_req(struct ahash_request *req, int err) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 49106ea42887..5845d4a08797 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req) if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 1 + (*buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req) int sh_len; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req) int sh_len; src_nents = sg_count(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); @@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_bytes = (1 + src_nents) * sizeof(struct sec4_sg_entry); @@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int ret = 0; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 2; sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req) if (to_hash) { src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); @@ -1572,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out) len = state->buflen_1; } else { buf = state->buf_0; - len = state->buflen_1; + len = state->buflen_0; } memcpy(export->buf, buf, len); diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 3cd8481065f8..6e37845abf8f 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD depends on CRYPTO_DEV_CCP default m select HW_RANDOM + select CRYPTO_SHA1 + select CRYPTO_SHA256 help Provides the interface to use the AMD Cryptographic Coprocessor which can be used to offload encryption operations such as SHA, diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c6e883b296a9..6613aee79b87 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; -/* The CCP cannot perform zero-length sha operations so the caller - * is required to buffer data for the final operation. However, a - * sha operation for a message with a total length of zero is valid - * so known values are required to supply the result. - */ -static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, - 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, - 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, - 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, - 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, - 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, -}; - static u32 ccp_addr_lo(struct ccp_dma_info *info) { return lower_32_bits(info->address + info->offset); @@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sha->msg_bits) return -EINVAL; - /* A sha operation for a message with a total length of zero, - * return known result. + /* The CCP cannot perform zero-length sha operations so the + * caller is required to buffer data for the final operation. + * However, a sha operation for a message with a total length + * of zero is valid so known values are required to supply + * the result. */ switch (sha->type) { case CCP_SHA_TYPE_1: - sha_zero = ccp_sha1_zero; + sha_zero = sha1_zero_message_hash; break; case CCP_SHA_TYPE_224: - sha_zero = ccp_sha224_zero; + sha_zero = sha224_zero_message_hash; break; case CCP_SHA_TYPE_256: - sha_zero = ccp_sha256_zero; + sha_zero = sha256_zero_message_hash; break; default: return -EINVAL; diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 6ade02f04f91..7690467c42f8 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) { struct ccp_pci *ccp_pci = ccp->dev_specific; struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct msix_entry msix_entry[MSIX_VECTORS]; unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; int v, ret; @@ -86,7 +86,7 @@ e_irq: static int ccp_get_msi_irq(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); int ret; ret = pci_enable_msi(pdev); @@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) { struct ccp_pci *ccp_pci = ccp->dev_specific; struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); if (ccp_pci->msix_count) { while (ccp_pci->msix_count--) @@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) static int ccp_find_mmio_area(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); resource_size_t io_len; unsigned long io_flags; diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 01b50cb4c982..66dd7c9d08c3 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -35,8 +35,7 @@ struct ccp_platform { static int ccp_get_irq(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); int ret; ret = platform_get_irq(pdev, 0); @@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct resource *ior; ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index ca5c71ab4b4d..eee2c7e6c299 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> @@ -36,14 +32,6 @@ #include <crypto/algapi.h> #include <crypto/des.h> -//#define HIFN_DEBUG - -#ifdef HIFN_DEBUG -#define dprintk(f, a...) printk(f, ##a) -#else -#define dprintk(f, a...) do {} while (0) -#endif - static char hifn_pll_ref[sizeof("extNNN")] = "ext"; module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); MODULE_PARM_DESC(hifn_pll_ref, @@ -79,12 +67,12 @@ static atomic_t hifn_dev_number; /* DMA registres */ -#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ -#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ +#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ +#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ -#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ +#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ #define HIFN_CHIP_ID 0x98 /* Chip ID */ @@ -358,10 +346,10 @@ static atomic_t hifn_dev_number; #define HIFN_NAMESIZE 32 #define HIFN_MAX_RESULT_ORDER 5 -#define HIFN_D_CMD_RSIZE 24*1 -#define HIFN_D_SRC_RSIZE 80*1 -#define HIFN_D_DST_RSIZE 80*1 -#define HIFN_D_RES_RSIZE 24*1 +#define HIFN_D_CMD_RSIZE (24 * 1) +#define HIFN_D_SRC_RSIZE (80 * 1) +#define HIFN_D_DST_RSIZE (80 * 1) +#define HIFN_D_RES_RSIZE (24 * 1) #define HIFN_D_DST_DALIGN 4 @@ -386,17 +374,16 @@ static atomic_t hifn_dev_number; #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) #define HIFN_USED_RESULT 12 -struct hifn_desc -{ +struct hifn_desc { volatile __le32 l; volatile __le32 p; }; struct hifn_dma { - struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; - struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; - struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; - struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE + 1]; + struct hifn_desc srcr[HIFN_D_SRC_RSIZE + 1]; + struct hifn_desc dstr[HIFN_D_DST_RSIZE + 1]; + struct hifn_desc resr[HIFN_D_RES_RSIZE + 1]; u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; @@ -410,16 +397,15 @@ struct hifn_dma { int cmdk, srck, dstk, resk; }; -#define HIFN_FLAG_CMD_BUSY (1<<0) -#define HIFN_FLAG_SRC_BUSY (1<<1) -#define HIFN_FLAG_DST_BUSY (1<<2) -#define HIFN_FLAG_RES_BUSY (1<<3) -#define HIFN_FLAG_OLD_KEY (1<<4) +#define HIFN_FLAG_CMD_BUSY (1 << 0) +#define HIFN_FLAG_SRC_BUSY (1 << 1) +#define HIFN_FLAG_DST_BUSY (1 << 2) +#define HIFN_FLAG_RES_BUSY (1 << 3) +#define HIFN_FLAG_OLD_KEY (1 << 4) #define HIFN_DEFAULT_ACTIVE_NUM 5 -struct hifn_device -{ +struct hifn_device { char name[HIFN_NAMESIZE]; int irq; @@ -432,7 +418,7 @@ struct hifn_device u32 dmareg; - void *sa[HIFN_D_RES_RSIZE]; + void *sa[HIFN_D_RES_RSIZE]; spinlock_t lock; @@ -447,7 +433,7 @@ struct hifn_device struct tasklet_struct tasklet; - struct crypto_queue queue; + struct crypto_queue queue; struct list_head alg_list; unsigned int pk_clk_freq; @@ -468,8 +454,7 @@ struct hifn_device #define HIFN_D_JUMP 0x40000000 #define HIFN_D_VALID 0x80000000 -struct hifn_base_command -{ +struct hifn_base_command { volatile __le16 masks; volatile __le16 session_num; volatile __le16 total_source_count; @@ -491,12 +476,11 @@ struct hifn_base_command /* * Structure to help build up the command data structure. */ -struct hifn_crypt_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_crypt_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ @@ -522,12 +506,11 @@ struct hifn_crypt_command /* * Structure to help build up the command data structure. */ -struct hifn_mac_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_mac_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_MAC_CMD_ALG_MASK 0x0001 @@ -551,12 +534,11 @@ struct hifn_mac_command #define HIFN_MAC_CMD_POS_IPSEC 0x0200 #define HIFN_MAC_CMD_NEW_KEY 0x0800 -struct hifn_comp_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_comp_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_COMP_CMD_SRCLEN_M 0xc000 @@ -570,12 +552,11 @@ struct hifn_comp_command #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ -struct hifn_base_result -{ - volatile __le16 flags; - volatile __le16 session; - volatile __le16 src_cnt; /* 15:0 of source count */ - volatile __le16 dst_cnt; /* 15:0 of dest count */ +struct hifn_base_result { + volatile __le16 flags; + volatile __le16 session; + volatile __le16 src_cnt; /* 15:0 of source count */ + volatile __le16 dst_cnt; /* 15:0 of dest count */ }; #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ @@ -584,8 +565,7 @@ struct hifn_base_result #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ #define HIFN_BASE_RES_DSTLEN_S 12 -struct hifn_comp_result -{ +struct hifn_comp_result { volatile __le16 flags; volatile __le16 crc; }; @@ -596,18 +576,16 @@ struct hifn_comp_result #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ -struct hifn_mac_result -{ - volatile __le16 flags; - volatile __le16 reserved; +struct hifn_mac_result { + volatile __le16 flags; + volatile __le16 reserved; /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ }; #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ -struct hifn_crypt_result -{ +struct hifn_crypt_result { volatile __le16 flags; volatile __le16 reserved; }; @@ -622,11 +600,10 @@ struct hifn_crypt_result #define HIFN_POLL_SCALAR 0x0 #endif -#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ -struct hifn_crypto_alg -{ +struct hifn_crypto_alg { struct list_head entry; struct crypto_alg alg; struct hifn_device *dev; @@ -634,24 +611,21 @@ struct hifn_crypto_alg #define ASYNC_SCATTERLIST_CACHE 16 -#define ASYNC_FLAGS_MISALIGNED (1<<0) +#define ASYNC_FLAGS_MISALIGNED (1 << 0) -struct hifn_cipher_walk -{ +struct hifn_cipher_walk { struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; u32 flags; int num; }; -struct hifn_context -{ +struct hifn_context { u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; struct hifn_device *dev; unsigned int keysize; }; -struct hifn_request_context -{ +struct hifn_request_context { u8 *iv; unsigned int ivsize; u8 op, type, mode, unused; @@ -693,7 +667,7 @@ static void hifn_wait_puc(struct hifn_device *dev) int i; u32 ret; - for (i=10000; i > 0; --i) { + for (i = 10000; i > 0; --i) { ret = hifn_read_0(dev, HIFN_0_PUCTRL); if (!(ret & HIFN_PUCTRL_RESET)) break; @@ -702,7 +676,7 @@ static void hifn_wait_puc(struct hifn_device *dev) } if (!i) - dprintk("%s: Failed to reset PUC unit.\n", dev->name); + dev_err(&dev->pdev->dev, "Failed to reset PUC unit.\n"); } static void hifn_reset_puc(struct hifn_device *dev) @@ -749,13 +723,12 @@ static void hifn_reset_dma(struct hifn_device *dev, int full) hifn_reset_puc(dev); } -static u32 hifn_next_signature(u_int32_t a, u_int cnt) +static u32 hifn_next_signature(u32 a, u_int cnt) { int i; u32 v; for (i = 0; i < cnt; i++) { - /* get the parity */ v = a & 0x80080125; v ^= v >> 16; @@ -846,33 +819,28 @@ static int hifn_init_pubrng(struct hifn_device *dev) hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); - for (i=100; i > 0; --i) { + for (i = 100; i > 0; --i) { mdelay(1); if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } - if (!i) - dprintk("Chip %s: Failed to initialise public key engine.\n", - dev->name); - else { + if (!i) { + dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n"); + } else { hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); dev->dmareg |= HIFN_DMAIER_PUBDONE; hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); - dprintk("Chip %s: Public key engine has been successfully " - "initialised.\n", dev->name); + dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n"); } - /* - * Enable RNG engine. - */ + /* Enable RNG engine. */ hifn_write_1(dev, HIFN_1_RNG_CONFIG, hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); - dprintk("Chip %s: RNG engine has been successfully initialised.\n", - dev->name); + dev_dbg(&dev->pdev->dev, "RNG engine has been successfully initialised.\n"); #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG /* First value must be discarded */ @@ -896,8 +864,8 @@ static int hifn_enable_crypto(struct hifn_device *dev) } } - if (offtbl == NULL) { - dprintk("Chip %s: Unknown card!\n", dev->name); + if (!offtbl) { + dev_err(&dev->pdev->dev, "Unknown card!\n"); return -ENODEV; } @@ -912,7 +880,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); mdelay(1); - for (i=0; i<12; ++i) { + for (i = 0; i < 12; ++i) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); @@ -920,7 +888,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) } hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); - dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); + dev_dbg(&dev->pdev->dev, "%s %s.\n", dev->name, pci_name(dev->pdev)); return 0; } @@ -931,16 +899,14 @@ static void hifn_init_dma(struct hifn_device *dev) u32 dptr = dev->desc_dma; int i; - for (i=0; i<HIFN_D_CMD_RSIZE; ++i) + for (i = 0; i < HIFN_D_CMD_RSIZE; ++i) dma->cmdr[i].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, command_bufs[i][0])); - for (i=0; i<HIFN_D_RES_RSIZE; ++i) + for (i = 0; i < HIFN_D_RES_RSIZE; ++i) dma->resr[i].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, result_bufs[i][0])); - /* - * Setup LAST descriptors. - */ + /* Setup LAST descriptors. */ dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + @@ -960,7 +926,7 @@ static void hifn_init_dma(struct hifn_device *dev) * to calculate the optimal multiplier. For PCI we assume 66MHz, since that * allows us to operate without the risk of overclocking the chip. If it * actually uses 33MHz, the chip will operate at half the speed, this can be - * overriden by specifying the frequency as module parameter (pci33). + * overridden by specifying the frequency as module parameter (pci33). * * Unfortunately the PCI clock is not very suitable since the HIFN needs a * stable clock and the PCI clock frequency may vary, so the default is the @@ -984,9 +950,8 @@ static void hifn_init_pll(struct hifn_device *dev) freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); else { freq = 66; - printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " - "override with hifn_pll_ref=%.3s<frequency>\n", - freq, hifn_pll_ref); + dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n", + freq, hifn_pll_ref); } m = HIFN_PLL_FCK_MAX / freq; @@ -1174,17 +1139,17 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, mask = 0; switch (rctx->op) { - case ACRYPTO_OP_DECRYPT: - mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; - break; - case ACRYPTO_OP_ENCRYPT: - mask = HIFN_BASE_CMD_CRYPT; - break; - case ACRYPTO_OP_HMAC: - mask = HIFN_BASE_CMD_MAC; - break; - default: - goto err_out; + case ACRYPTO_OP_DECRYPT: + mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; + break; + case ACRYPTO_OP_ENCRYPT: + mask = HIFN_BASE_CMD_CRYPT; + break; + case ACRYPTO_OP_HMAC: + mask = HIFN_BASE_CMD_MAC; + break; + default: + goto err_out; } buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, @@ -1199,53 +1164,53 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, md |= HIFN_CRYPT_CMD_NEW_IV; switch (rctx->mode) { - case ACRYPTO_MODE_ECB: - md |= HIFN_CRYPT_CMD_MODE_ECB; - break; - case ACRYPTO_MODE_CBC: - md |= HIFN_CRYPT_CMD_MODE_CBC; - break; - case ACRYPTO_MODE_CFB: - md |= HIFN_CRYPT_CMD_MODE_CFB; - break; - case ACRYPTO_MODE_OFB: - md |= HIFN_CRYPT_CMD_MODE_OFB; - break; - default: - goto err_out; + case ACRYPTO_MODE_ECB: + md |= HIFN_CRYPT_CMD_MODE_ECB; + break; + case ACRYPTO_MODE_CBC: + md |= HIFN_CRYPT_CMD_MODE_CBC; + break; + case ACRYPTO_MODE_CFB: + md |= HIFN_CRYPT_CMD_MODE_CFB; + break; + case ACRYPTO_MODE_OFB: + md |= HIFN_CRYPT_CMD_MODE_OFB; + break; + default: + goto err_out; } switch (rctx->type) { - case ACRYPTO_TYPE_AES_128: - if (ctx->keysize != 16) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_128 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_AES_192: - if (ctx->keysize != 24) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_192 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_AES_256: - if (ctx->keysize != 32) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_256 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_3DES: - if (ctx->keysize != 24) - goto err_out; - md |= HIFN_CRYPT_CMD_ALG_3DES; - break; - case ACRYPTO_TYPE_DES: - if (ctx->keysize != 8) - goto err_out; - md |= HIFN_CRYPT_CMD_ALG_DES; - break; - default: + case ACRYPTO_TYPE_AES_128: + if (ctx->keysize != 16) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_128 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_192: + if (ctx->keysize != 24) goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_192 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_256: + if (ctx->keysize != 32) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_256 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_3DES: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_3DES; + break; + case ACRYPTO_TYPE_DES: + if (ctx->keysize != 8) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_DES; + break; + default: + goto err_out; } buf_pos += hifn_setup_crypto_command(dev, buf_pos, @@ -1265,8 +1230,9 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); dma->cmdi = 0; - } else - dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); + } else { + dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID); + } if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); @@ -1424,7 +1390,7 @@ static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, sg_init_table(w->cache, num); w->num = 0; - for (i=0; i<num; ++i) { + for (i = 0; i < num; ++i) { struct page *page = alloc_page(gfp_flags); struct scatterlist *s; @@ -1444,7 +1410,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) { int i; - for (i=0; i<w->num; ++i) { + for (i = 0; i < w->num; ++i) { struct scatterlist *s = &w->cache[i]; __free_page(sg_page(s)); @@ -1471,8 +1437,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, drest -= copy; nbytes -= copy; - dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", - __func__, copy, size, drest, nbytes); + pr_debug("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", + __func__, copy, size, drest, nbytes); dst++; idx++; @@ -1499,8 +1465,8 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, dst = &req->dst[idx]; - dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", - __func__, dst->length, dst->offset, offset, nbytes); + pr_debug("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", + __func__, dst->length, dst->offset, offset, nbytes); if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || @@ -1525,10 +1491,10 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, * to put there additional blocksized chunk, * so we mark that page as containing only * blocksize aligned chunks: - * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); + * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); * and increase number of bytes to be processed * in next chunk: - * nbytes += diff; + * nbytes += diff; */ nbytes += diff; @@ -1536,14 +1502,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, * Temporary of course... * Kick author if you will catch this one. */ - printk(KERN_ERR "%s: dlen: %u, nbytes: %u," - "slen: %u, offset: %u.\n", - __func__, dlen, nbytes, slen, offset); - printk(KERN_ERR "%s: please contact author to fix this " - "issue, generally you should not catch " - "this path under any condition but who " - "knows how did you use crypto code.\n" - "Thank you.\n", __func__); + pr_err("%s: dlen: %u, nbytes: %u, slen: %u, offset: %u.\n", + __func__, dlen, nbytes, slen, offset); + pr_err("%s: please contact author to fix this " + "issue, generally you should not catch " + "this path under any condition but who " + "knows how did you use crypto code.\n" + "Thank you.\n", __func__); BUG(); } else { copy += diff + nbytes; @@ -1630,70 +1595,16 @@ err_out: spin_unlock_irqrestore(&dev->lock, flags); err_out_exit: if (err) { - printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " - "type: %u, err: %d.\n", - dev->name, rctx->iv, rctx->ivsize, - ctx->key, ctx->keysize, - rctx->mode, rctx->op, rctx->type, err); + dev_info(&dev->pdev->dev, "iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + "type: %u, err: %d.\n", + rctx->iv, rctx->ivsize, + ctx->key, ctx->keysize, + rctx->mode, rctx->op, rctx->type, err); } return err; } -static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) -{ - int n, err; - u8 src[16]; - struct hifn_context ctx; - struct hifn_request_context rctx; - u8 fips_aes_ecb_from_zero[16] = { - 0x66, 0xE9, 0x4B, 0xD4, - 0xEF, 0x8A, 0x2C, 0x3B, - 0x88, 0x4C, 0xFA, 0x59, - 0xCA, 0x34, 0x2B, 0x2E}; - struct scatterlist sg; - - memset(src, 0, sizeof(src)); - memset(ctx.key, 0, sizeof(ctx.key)); - - ctx.dev = dev; - ctx.keysize = 16; - rctx.ivsize = 0; - rctx.iv = NULL; - rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; - rctx.mode = ACRYPTO_MODE_ECB; - rctx.type = ACRYPTO_TYPE_AES_128; - rctx.walk.cache[0].length = 0; - - sg_init_one(&sg, &src, sizeof(src)); - - err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); - if (err) - goto err_out; - - dev->started = 0; - msleep(200); - - dprintk("%s: decoded: ", dev->name); - for (n=0; n<sizeof(src); ++n) - dprintk("%02x ", src[n]); - dprintk("\n"); - dprintk("%s: FIPS : ", dev->name); - for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n) - dprintk("%02x ", fips_aes_ecb_from_zero[n]); - dprintk("\n"); - - if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) { - printk(KERN_INFO "%s: AES 128 ECB test has been successfully " - "passed.\n", dev->name); - return 0; - } - -err_out: - printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); - return -1; -} - static int hifn_start_device(struct hifn_device *dev) { int err; @@ -1739,8 +1650,8 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset saddr += copy; offset = 0; - dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", - __func__, copy, size, srest, nbytes); + pr_debug("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", + __func__, copy, size, srest, nbytes); dst++; idx++; @@ -1760,7 +1671,8 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i) dev->sa[i] = NULL; dev->started--; if (dev->started < 0) - printk("%s: started: %d.\n", __func__, dev->started); + dev_info(&dev->pdev->dev, "%s: started: %d.\n", __func__, + dev->started); spin_unlock_irqrestore(&dev->lock, flags); BUG_ON(dev->started < 0); } @@ -1779,7 +1691,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error) t = &rctx->walk.cache[idx]; dst = &req->dst[idx]; - dprintk("\n%s: sg_page(t): %p, t->length: %u, " + pr_debug("\n%s: sg_page(t): %p, t->length: %u, " "sg_page(dst): %p, dst->length: %u, " "nbytes: %u.\n", __func__, sg_page(t), t->length, @@ -1815,9 +1727,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i, u; - dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " "k: %d.%d.%d.%d.\n", - dev->name, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu, dma->cmdk, dma->srck, dma->dstk, dma->resk); @@ -1870,9 +1781,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) } dma->dstk = i; dma->dstu = u; - dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + dev_dbg(&dev->pdev->dev, "ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " "k: %d.%d.%d.%d.\n", - dev->name, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu, dma->cmdk, dma->srck, dma->dstk, dma->resk); @@ -1921,21 +1831,22 @@ static void hifn_work(struct work_struct *work) int i; struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; - printk("%s: r: %08x, active: %d, started: %d, " - "success: %lu: qlen: %u/%u, reset: %d.\n", - dev->name, r, dev->active, dev->started, - dev->success, dev->queue.qlen, dev->queue.max_qlen, - reset); + dev_info(&dev->pdev->dev, + "r: %08x, active: %d, started: %d, " + "success: %lu: qlen: %u/%u, reset: %d.\n", + r, dev->active, dev->started, + dev->success, dev->queue.qlen, dev->queue.max_qlen, + reset); - printk("%s: res: ", __func__); - for (i=0; i<HIFN_D_RES_RSIZE; ++i) { - printk("%x.%p ", dma->resr[i].l, dev->sa[i]); + dev_info(&dev->pdev->dev, "%s: res: ", __func__); + for (i = 0; i < HIFN_D_RES_RSIZE; ++i) { + pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]); if (dev->sa[i]) { hifn_process_ready(dev->sa[i], -ENODEV); hifn_complete_sa(dev, i); } } - printk("\n"); + pr_info("\n"); hifn_reset_dma(dev, 1); hifn_stop_device(dev); @@ -1957,9 +1868,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); - dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " + dev_dbg(&dev->pdev->dev, "1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", - dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, + dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu); @@ -1978,9 +1889,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) if (restart) { u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); - printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), - !!(dmacsr & HIFN_DMACSR_D_OVER), + dev_warn(&dev->pdev->dev, "overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), puisr, !!(puisr & HIFN_PUISR_DSTOVER)); if (!!(puisr & HIFN_PUISR_DSTOVER)) hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); @@ -1991,18 +1902,18 @@ static irqreturn_t hifn_interrupt(int irq, void *data) restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { - printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), - !!(dmacsr & HIFN_DMACSR_S_ABORT), - !!(dmacsr & HIFN_DMACSR_D_ABORT), - !!(dmacsr & HIFN_DMACSR_R_ABORT)); + dev_warn(&dev->pdev->dev, "abort: c: %d, s: %d, d: %d, r: %d.\n", + !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); hifn_reset_dma(dev, 1); hifn_init_dma(dev); hifn_init_registers(dev); } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { - dprintk("%s: wait on command.\n", dev->name); + dev_dbg(&dev->pdev->dev, "wait on command.\n"); dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); } @@ -2020,19 +1931,19 @@ static void hifn_flush(struct hifn_device *dev) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i; - for (i=0; i<HIFN_D_RES_RSIZE; ++i) { + for (i = 0; i < HIFN_D_RES_RSIZE; ++i) { struct hifn_desc *d = &dma->resr[i]; if (dev->sa[i]) { hifn_process_ready(dev->sa[i], - (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + (d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0); hifn_complete_sa(dev, i); } } spin_lock_irqsave(&dev->lock, flags); while ((async_req = crypto_dequeue_request(&dev->queue))) { - req = container_of(async_req, struct ablkcipher_request, base); + req = ablkcipher_request_cast(async_req); spin_unlock_irqrestore(&dev->lock, flags); hifn_process_ready(req, -ENODEV); @@ -2057,7 +1968,7 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, if (len == HIFN_DES_KEY_LENGTH) { u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); - + if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; @@ -2151,7 +2062,7 @@ static int hifn_process_queue(struct hifn_device *dev) if (backlog) backlog->complete(backlog, -EINPROGRESS); - req = container_of(async_req, struct ablkcipher_request, base); + req = ablkcipher_request_cast(async_req); err = hifn_handle_req(req); if (err) @@ -2298,9 +2209,7 @@ static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); } -/* - * 3DES decryption functions. - */ +/* 3DES decryption functions. */ static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) { return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, @@ -2322,8 +2231,7 @@ static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); } -struct hifn_alg_template -{ +struct hifn_alg_template { char name[CRYPTO_MAX_ALG_NAME]; char drv_name[CRYPTO_MAX_ALG_NAME]; unsigned int bsize; @@ -2483,7 +2391,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) struct hifn_crypto_alg *alg; int err; - alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); + alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (!alg) return -ENOMEM; @@ -2530,7 +2438,7 @@ static int hifn_register_alg(struct hifn_device *dev) { int i, err; - for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) { + for (i = 0; i < ARRAY_SIZE(hifn_alg_templates); ++i) { err = hifn_alg_alloc(dev, &hifn_alg_templates[i]); if (err) goto err_out_exit; @@ -2575,7 +2483,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_disable_pci_device; snprintf(name, sizeof(name), "hifn%d", - atomic_inc_return(&hifn_dev_number)-1); + atomic_inc_return(&hifn_dev_number) - 1); err = pci_request_regions(pdev, name); if (err) @@ -2584,8 +2492,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { - dprintk("%s: Broken hardware - I/O regions are too small.\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Broken hardware - I/O regions are too small.\n"); err = -ENODEV; goto err_out_free_regions; } @@ -2602,7 +2509,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(dev->name, sizeof(dev->name), "%s", name); spin_lock_init(&dev->lock); - for (i=0; i<3; ++i) { + for (i = 0; i < 3; ++i) { unsigned long addr, size; addr = pci_resource_start(pdev, i); @@ -2618,7 +2525,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), &dev->desc_dma); if (!dev->desc_virt) { - dprintk("Failed to allocate descriptor rings.\n"); + dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n"); err = -ENOMEM; goto err_out_unmap_bars; } @@ -2626,7 +2533,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->pdev = pdev; dev->irq = pdev->irq; - for (i=0; i<HIFN_D_RES_RSIZE; ++i) + for (i = 0; i < HIFN_D_RES_RSIZE; ++i) dev->sa[i] = NULL; pci_set_drvdata(pdev, dev); @@ -2637,7 +2544,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); if (err) { - dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); + dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n", + dev->irq, err); dev->irq = 0; goto err_out_free_desc; } @@ -2646,10 +2554,6 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_out_free_irq; - err = hifn_test(dev, 1, 0); - if (err) - goto err_out_stop_device; - err = hifn_register_rng(dev); if (err) goto err_out_stop_device; @@ -2661,9 +2565,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_DELAYED_WORK(&dev->work, hifn_work); schedule_delayed_work(&dev->work, HZ); - dprintk("HIFN crypto accelerator card at %s has been " - "successfully registered as %s.\n", - pci_name(pdev), dev->name); + dev_dbg(&pdev->dev, "HIFN crypto accelerator card at %s has been " + "successfully registered as %s.\n", + pci_name(pdev), dev->name); return 0; @@ -2680,7 +2584,7 @@ err_out_free_desc: dev->desc_virt, dev->desc_dma); err_out_unmap_bars: - for (i=0; i<3; ++i) + for (i = 0; i < 3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); @@ -2715,7 +2619,7 @@ static void hifn_remove(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); - for (i=0; i<3; ++i) + for (i = 0; i < 3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); @@ -2750,8 +2654,7 @@ static int __init hifn_init(void) if (strncmp(hifn_pll_ref, "ext", 3) && strncmp(hifn_pll_ref, "pci", 3)) { - printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " - "must be pci or ext"); + pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext"); return -EINVAL; } @@ -2763,22 +2666,21 @@ static int __init hifn_init(void) if (hifn_pll_ref[3] != '\0') { freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); if (freq < 20 || freq > 100) { - printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " - "frequency, must be in the range " - "of 20-100"); + pr_err("hifn795x: invalid hifn_pll_ref frequency, must" + "be in the range of 20-100"); return -EINVAL; } } err = pci_register_driver(&hifn_pci_driver); if (err < 0) { - dprintk("Failed to register PCI driver for %s device.\n", - hifn_pci_driver.name); + pr_err("Failed to register PCI driver for %s device.\n", + hifn_pci_driver.name); return -ENODEV; } - printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " - "has been successfully registered.\n"); + pr_info("Driver for HIFN 795x crypto accelerator chip " + "has been successfully registered.\n"); return 0; } @@ -2787,8 +2689,8 @@ static void __exit hifn_fini(void) { pci_unregister_driver(&hifn_pci_driver); - printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " - "has been successfully unregistered.\n"); + pr_info("Driver for HIFN 795x crypto accelerator chip " + "has been successfully unregistered.\n"); } module_init(hifn_init); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8f2790353281..e52496a172d0 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -510,10 +510,8 @@ npe_error: printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); ret = -EIO; err: - if (ctx_pool) - dma_pool_destroy(ctx_pool); - if (buffer_pool) - dma_pool_destroy(buffer_pool); + dma_pool_destroy(ctx_pool); + dma_pool_destroy(buffer_pool); npe_release(npe_c); return ret; } diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 6edae64bb387..dcf1fceb9336 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, return -EINVAL; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (creq->src_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of src SG"); + return creq->src_nents; + } creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); + if (creq->dst_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of dst SG"); + return creq->dst_nents; + } mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, CESA_SA_DESC_CFG_OP_MSK); diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 6ec55b4a087b..683cca9ac3c4 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) creq->req.base.type = CESA_STD_REQ; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (creq->src_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of src SG"); + return creq->src_nents; + } ret = mv_cesa_ahash_cache_req(req, cached); if (ret) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 5450880abb7b..b85a7a7dbf63 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) struct n2_ahash_alg { struct list_head entry; - const char *hash_zero; + const u8 *hash_zero; const u32 *hash_init; u8 hw_op_hashsz; u8 digest_size; @@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs); struct n2_hash_tmpl { const char *name; - const char *hash_zero; + const u8 *hash_zero; const u32 *hash_init; u8 hw_op_hashsz; u8 digest_size; @@ -1276,40 +1276,19 @@ struct n2_hash_tmpl { u8 hmac_type; }; -static const char md5_zero[MD5_DIGEST_SIZE] = { - 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, - 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, -}; static const u32 md5_init[MD5_HASH_WORDS] = { cpu_to_le32(MD5_H0), cpu_to_le32(MD5_H1), cpu_to_le32(MD5_H2), cpu_to_le32(MD5_H3), }; -static const char sha1_zero[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, - 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, - 0x07, 0x09 -}; static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, }; -static const char sha256_zero[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, - 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, - 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, - 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }; -static const char sha224_zero[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f -}; static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, @@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { static const struct n2_hash_tmpl hash_tmpls[] = { { .name = "md5", - .hash_zero = md5_zero, + .hash_zero = md5_zero_message_hash, .hash_init = md5_init, .auth_type = AUTH_TYPE_MD5, .hmac_type = AUTH_TYPE_HMAC_MD5, @@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = MD5_DIGEST_SIZE, .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", - .hash_zero = sha1_zero, + .hash_zero = sha1_zero_message_hash, .hash_init = sha1_init, .auth_type = AUTH_TYPE_SHA1, .hmac_type = AUTH_TYPE_HMAC_SHA1, @@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", - .hash_zero = sha256_zero, + .hash_zero = sha256_zero_message_hash, .hash_init = sha256_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_HMAC_SHA256, @@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", - .hash_zero = sha224_zero, + .hash_zero = sha224_zero_message_hash, .hash_init = sha224_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_RESERVED, @@ -2243,22 +2222,19 @@ static struct platform_driver n2_mau_driver = { .remove = n2_mau_remove, }; +static struct platform_driver * const drivers[] = { + &n2_crypto_driver, + &n2_mau_driver, +}; + static int __init n2_init(void) { - int err = platform_driver_register(&n2_crypto_driver); - - if (!err) { - err = platform_driver_register(&n2_mau_driver); - if (err) - platform_driver_unregister(&n2_crypto_driver); - } - return err; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit n2_exit(void) { - platform_driver_unregister(&n2_mau_driver); - platform_driver_unregister(&n2_crypto_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(n2_init); diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 9ef51fafdbff..1710f80a09ec 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, (unsigned int)ccw, (unsigned int)be32_to_cpu(crb->ccw)); + /* + * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. + * XER[S0] is the integer summary overflow bit which is nothing + * to do NX. Since this bit can be set with other return values, + * mask this bit. + */ + ret &= ~ICSWX_XERS0; + switch (ret) { case ICSWX_INITIATED: ret = wait_for_csb(wmem, csb); @@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, pr_err_ratelimited("ICSWX rejected\n"); ret = -EPROTO; break; - default: - pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); - ret = -EPROTO; - break; } if (!ret) @@ -525,7 +529,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, static int __init nx842_powernv_probe(struct device_node *dn) { struct nx842_coproc *coproc; - struct property *ct_prop, *ci_prop; unsigned int ct, ci; int chip_id; @@ -534,18 +537,16 @@ static int __init nx842_powernv_probe(struct device_node *dn) pr_err("ibm,chip-id missing\n"); return -EINVAL; } - ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); - if (!ct_prop) { + + if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { pr_err("ibm,842-coprocessor-type missing\n"); return -EINVAL; } - ct = be32_to_cpu(*(unsigned int *)ct_prop->value); - ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); - if (!ci_prop) { + + if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { pr_err("ibm,842-coprocessor-instance missing\n"); return -EINVAL; } - ci = be32_to_cpu(*(unsigned int *)ci_prop->value); coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); if (!coproc) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index eba23147c0ee..dd355bd19474 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - int err = 0; - pr_debug("total: %d\n", dd->total); omap_aes_dma_stop(dd); @@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - return err; + return 0; } static int omap_aes_check_aligned(struct scatterlist *sg, int total) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 0a70e46d5416..dd7b93f2f94c 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -527,8 +527,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) { - int err = 0; - pr_debug("total: %d\n", dd->total); omap_des_dma_stop(dd); @@ -536,7 +534,7 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - return err; + return 0; } static int omap_des_copy_needed(struct scatterlist *sg) @@ -1086,6 +1084,7 @@ static int omap_des_probe(struct platform_device *pdev) dd->phys_base = res->start; pm_runtime_enable(dev); + pm_runtime_irq_safe(dev); err = pm_runtime_get_sync(dev); if (err < 0) { pm_runtime_put_noidle(dev); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index da2d6777bd09..441e86b23571 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -238,7 +238,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. * We could avoid some copying here but it's probably not worth it. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { ecb_crypt_copy(in, out, key, cword, count); return; } @@ -250,7 +250,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) { /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) return cbc_crypt_copy(in, out, key, iv, cword, count); return rep_xcrypt_cbc(in, out, key, iv, cword, count); @@ -515,7 +515,7 @@ static int __init padlock_init(void) if (!x86_match_cpu(padlock_cpu_id)) return -ENODEV; - if (!cpu_has_xcrypt_enabled) { + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 4e154c9b9206..8c5f90647b7a 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -540,7 +540,7 @@ static int __init padlock_init(void) struct shash_alg *sha1; struct shash_alg *sha256; - if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) + if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) return -ENODEV; /* Register the newly added algorithm module if on * diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 615da961c4d8..3b1c7ecf078f 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -272,12 +272,6 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, return indx; } -/* Count the number of scatterlist entries in a scatterlist. */ -static inline int sg_count(struct scatterlist *sg_list, int nbytes) -{ - return sg_nents_for_len(sg_list, nbytes); -} - static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) { ddt->p = phys; @@ -295,12 +289,17 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, enum dma_data_direction dir, dma_addr_t *ddt_phys) { - unsigned nents, mapped_ents; + unsigned mapped_ents; struct scatterlist *cur; struct spacc_ddt *ddt; int i; + int nents; - nents = sg_count(payload, nbytes); + nents = sg_nents_for_len(payload, nbytes); + if (nents < 0) { + dev_err(engine->dev, "Invalid numbers of SG.\n"); + return NULL; + } mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); if (mapped_ents + 1 > MAX_DDT_LEN) @@ -328,7 +327,7 @@ static int spacc_aead_make_ddts(struct aead_request *areq) struct spacc_engine *engine = req->engine; struct spacc_ddt *src_ddt, *dst_ddt; unsigned total; - unsigned int src_nents, dst_nents; + int src_nents, dst_nents; struct scatterlist *cur; int i, dst_ents, src_ents; @@ -336,13 +335,21 @@ static int spacc_aead_make_ddts(struct aead_request *areq) if (req->is_encrypt) total += crypto_aead_authsize(aead); - src_nents = sg_count(areq->src, total); + src_nents = sg_nents_for_len(areq->src, total); + if (src_nents < 0) { + dev_err(engine->dev, "Invalid numbers of src SG.\n"); + return src_nents; + } if (src_nents + 1 > MAX_DDT_LEN) return -E2BIG; dst_nents = 0; if (areq->src != areq->dst) { - dst_nents = sg_count(areq->dst, total); + dst_nents = sg_nents_for_len(areq->dst, total); + if (dst_nents < 0) { + dev_err(engine->dev, "Invalid numbers of dst SG.\n"); + return dst_nents; + } if (src_nents + 1 > MAX_DDT_LEN) return -E2BIG; } @@ -422,13 +429,22 @@ static void spacc_aead_free_ddts(struct spacc_req *req) (req->is_encrypt ? crypto_aead_authsize(aead) : 0); struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); struct spacc_engine *engine = aead_ctx->generic.engine; - unsigned nents = sg_count(areq->src, total); + int nents = sg_nents_for_len(areq->src, total); + + /* sg_nents_for_len should not fail since it works when mapping sg */ + if (unlikely(nents < 0)) { + dev_err(engine->dev, "Invalid numbers of src SG.\n"); + return; + } if (areq->src != areq->dst) { dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); - dma_unmap_sg(engine->dev, areq->dst, - sg_count(areq->dst, total), - DMA_FROM_DEVICE); + nents = sg_nents_for_len(areq->dst, total); + if (unlikely(nents < 0)) { + dev_err(engine->dev, "Invalid numbers of dst SG.\n"); + return; + } + dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE); } else dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); @@ -440,7 +456,12 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, dma_addr_t ddt_addr, struct scatterlist *payload, unsigned nbytes, enum dma_data_direction dir) { - unsigned nents = sg_count(payload, nbytes); + int nents = sg_nents_for_len(payload, nbytes); + + if (nents < 0) { + dev_err(req->engine->dev, "Invalid numbers of SG.\n"); + return; + } dma_unmap_sg(req->engine->dev, payload, nents, dir); dma_pool_free(req->engine->req_pool, ddt, ddt_addr); @@ -835,8 +856,7 @@ static int spacc_ablk_need_fallback(struct spacc_req *req) static void spacc_ablk_complete(struct spacc_req *req) { - struct ablkcipher_request *ablk_req = - container_of(req->req, struct ablkcipher_request, base); + struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); if (ablk_req->src != ablk_req->dst) { spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index eefccf7b8be7..85b44e577684 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -22,6 +22,28 @@ config CRYPTO_DEV_QAT_DH895xCC To compile this as a module, choose M here: the module will be called qat_dh895xcc. +config CRYPTO_DEV_QAT_C3XXX + tristate "Support for Intel(R) C3XXX" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c3xxx. + +config CRYPTO_DEV_QAT_C62X + tristate "Support for Intel(R) C62X" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C62x with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c62x. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on X86 && PCI @@ -34,3 +56,27 @@ config CRYPTO_DEV_QAT_DH895xCCVF To compile this as a module, choose M here: the module will be called qat_dh895xccvf. + +config CRYPTO_DEV_QAT_C3XXXVF + tristate "Support for Intel(R) C3XXX Virtual Function" + depends on X86 && PCI + select PCI_IOV + select CRYPTO_DEV_QAT + help + Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology + Virtual Function for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c3xxxvf. + +config CRYPTO_DEV_QAT_C62XVF + tristate "Support for Intel(R) C62X Virtual Function" + depends on X86 && PCI + select PCI_IOV + select CRYPTO_DEV_QAT + help + Support for Intel(R) C62x with Intel(R) QuickAssist Technology + Virtual Function for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c62xvf. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index a3ce0b70e32f..8265106f1c8e 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -1,3 +1,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile new file mode 100644 index 000000000000..8f5fd4838a96 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o +qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c new file mode 100644 index 000000000000..c5bd5a9abc4d --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -0,0 +1,238 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_pf2vf_msg.h> +#include "adf_c3xxx_hw_data.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const u32 thrd_to_arb_map_6_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static struct adf_hw_device_class c3xxx_class = { + .name = ADF_C3XXX_DEVICE_NAME, + .type = DEV_C3XXX, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET & + ADF_C3XXX_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXX_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXX_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return 0; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int aes = get_num_aes(self); + + if (aes == 6) + return DEV_SKU_4; + + return DEV_SKU_UNKNOWN; +} + +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_6_me_sku; + break; + default: + dev_err(&GET_DEV(accel_dev), + "The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C3XXX_PF2VF_OFFSET(i); +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C3XXX_VINTMSK_OFFSET(i); +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i)); + val |= ADF_C3XXX_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i)); + val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i)); + val |= ADF_C3XXX_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i)); + val |= ADF_C3XXX_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val); + } +} + +static void adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET, + ADF_C3XXX_SMIA0_MASK); + ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET, + ADF_C3XXX_SMIA1_MASK); +} + +static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c3xxx_class; + hw_data->instance_id = c3xxx_class.instances++; + hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_C3XXX_FW; + hw_data->fw_mmp_name = ADF_C3XXX_MMP; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->disable_iov = adf_disable_sriov; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_enable_ints; + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; +} + +void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h new file mode 100644 index 000000000000..2f2681d3458a --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -0,0 +1,83 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C3XXX_HW_DATA_H_ +#define ADF_C3XXX_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_C3XXX_PMISC_BAR 0 +#define ADF_C3XXX_ETR_BAR 1 +#define ADF_C3XXX_RX_RINGS_OFFSET 8 +#define ADF_C3XXX_TX_RINGS_MASK 0xFF +#define ADF_C3XXX_MAX_ACCELERATORS 3 +#define ADF_C3XXX_MAX_ACCELENGINES 6 +#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 +#define ADF_C3XXX_ACCELERATORS_MASK 0x3 +#define ADF_C3XXX_ACCELENGINES_MASK 0x3F +#define ADF_C3XXX_ETR_MAX_BANKS 16 +#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_C3XXX_SMIA0_MASK 0xFFFF +#define ADF_C3XXX_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_C3XXX_ERRSSMSH_EN BIT(3) + +#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +#define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) + +/* Firmware Binary */ +#define ADF_C3XXX_FW "qat_c3xxx.bin" +#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin" + +void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c new file mode 100644 index 000000000000..e13bd08ddd1e --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -0,0 +1,335 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include "adf_c3xxx_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C3XXX_PCI_DEVICE_ID: + adf_clean_hw_data_c3xxx(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C3XXX_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_c3xxx(accel_dev->hw_device); + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err_free_reg; + } + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err_free_reg; + } + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile new file mode 100644 index 000000000000..16d178e2eaa2 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o +qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c new file mode 100644 index 000000000000..1af321c2ce1a --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -0,0 +1,173 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include <adf_pf2vf_msg.h> +#include <adf_common_drv.h> +#include "adf_c3xxxvf_hw_data.h" + +static struct adf_hw_device_class c3xxxiov_class = { + .name = ADF_C3XXXVF_DEVICE_NAME, + .type = DEV_C3XXXVF, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return ADF_C3XXXIOV_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return ADF_C3XXXIOV_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_MAX_ACCELENGINES; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_VF; +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C3XXXIOV_PF2VF_OFFSET; +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C3XXXIOV_VINTMSK_OFFSET; +} + +static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) +{ +} + +static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Init event to PF\n"); + return -EFAULT; + } + return 0; +} + +static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Shutdown event to PF\n"); +} + +void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c3xxxiov_class; + hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK; + hw_data->alloc_irq = adf_vf_isr_resource_alloc; + hw_data->free_irq = adf_vf_isr_resource_free; + hw_data->enable_error_correction = adf_vf_void_noop; + hw_data->init_admin_comms = adf_vf_int_noop; + hw_data->exit_admin_comms = adf_vf_void_noop; + hw_data->send_admin_init = adf_vf2pf_init; + hw_data->init_arb = adf_vf_int_noop; + hw_data->exit_arb = adf_vf_void_noop; + hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->enable_ints = adf_vf_void_noop; + hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); +} + +void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); +} diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h index e270e4a63d14..934f216acf39 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h @@ -3,7 +3,7 @@ redistributing this file, you may do so under either license. GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. + Copyright(c) 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. @@ -17,7 +17,7 @@ qat-linux@intel.com BSD LICENSE - Copyright(c) 2014 Intel Corporation. + Copyright(c) 2015 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -44,14 +44,21 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ADF_DH895xVF_DRV_H_ -#define ADF_DH895xVF_DRV_H_ -#include <adf_accel_devices.h> -#include <adf_transport.h> - -void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); -void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); -int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); -void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); -void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); +#ifndef ADF_C3XXXVF_HW_DATA_H_ +#define ADF_C3XXXVF_HW_DATA_H_ + +#define ADF_C3XXXIOV_PMISC_BAR 1 +#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1 +#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1 +#define ADF_C3XXXIOV_MAX_ACCELERATORS 1 +#define ADF_C3XXXIOV_MAX_ACCELENGINES 1 +#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8 +#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF +#define ADF_C3XXXIOV_ETR_BAR 0 +#define ADF_C3XXXIOV_ETR_MAX_BANKS 1 +#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200 +#define ADF_C3XXXIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c new file mode 100644 index 000000000000..1ac4ae90e072 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include "adf_c3xxxvf_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXXVF_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + struct adf_accel_dev *pf; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C3XXXIOV_PCI_DEVICE_ID: + adf_clean_hw_data_c3xxxiov(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); + adf_devmgr_rm_dev(accel_dev, pf); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_dev *pf; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C3XXXIOV_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + accel_dev->is_vf = true; + pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table */ + if (adf_devmgr_add_dev(accel_dev, pf)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + INIT_LIST_HEAD(&accel_dev->crypto_list); + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + accel_dev->hw_device = hw_data; + adf_init_hw_data_c3xxxiov(accel_dev->hw_device); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + /* Completion for VF2PF request/response message exchange */ + init_completion(&accel_dev->vf.iov_msg_completion); + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + adf_clean_vf_map(true); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile new file mode 100644 index 000000000000..bd75ace59b76 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o +qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c new file mode 100644 index 000000000000..879e04cae714 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -0,0 +1,248 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_pf2vf_msg.h> +#include "adf_c62x_hw_data.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const u32 thrd_to_arb_map_8_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0 +}; + +static const u32 thrd_to_arb_map_10_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static struct adf_hw_device_class c62x_class = { + .name = ADF_C62X_DEVICE_NAME, + .type = DEV_C62X, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET & + ADF_C62X_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return (~fuse) & ADF_C62X_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_SRAM_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int aes = get_num_aes(self); + + if (aes == 8) + return DEV_SKU_2; + else if (aes == 10) + return DEV_SKU_4; + + return DEV_SKU_UNKNOWN; +} + +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_2: + *arb_map_config = thrd_to_arb_map_8_me_sku; + break; + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_10_me_sku; + break; + default: + dev_err(&GET_DEV(accel_dev), + "The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C62X_PF2VF_OFFSET(i); +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C62X_VINTMSK_OFFSET(i); +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i)); + val |= ADF_C62X_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i)); + val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i)); + val |= ADF_C62X_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i)); + val |= ADF_C62X_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val); + } +} + +static void adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, + ADF_C62X_SMIA0_MASK); + ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, + ADF_C62X_SMIA1_MASK); +} + +static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c62x_class; + hw_data->instance_id = c62x_class.instances++; + hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_C62X_FW; + hw_data->fw_mmp_name = ADF_C62X_MMP; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->disable_iov = adf_disable_sriov; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_enable_ints; + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; +} + +void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h new file mode 100644 index 000000000000..17a8a32d5c63 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -0,0 +1,84 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C62X_HW_DATA_H_ +#define ADF_C62X_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_C62X_SRAM_BAR 0 +#define ADF_C62X_PMISC_BAR 1 +#define ADF_C62X_ETR_BAR 2 +#define ADF_C62X_RX_RINGS_OFFSET 8 +#define ADF_C62X_TX_RINGS_MASK 0xFF +#define ADF_C62X_MAX_ACCELERATORS 5 +#define ADF_C62X_MAX_ACCELENGINES 10 +#define ADF_C62X_ACCELERATORS_REG_OFFSET 16 +#define ADF_C62X_ACCELERATORS_MASK 0x1F +#define ADF_C62X_ACCELENGINES_MASK 0x3FF +#define ADF_C62X_ETR_MAX_BANKS 16 +#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_C62X_SMIA0_MASK 0xFFFF +#define ADF_C62X_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_C62X_ERRSSMSH_EN BIT(3) + +#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) + +/* Firmware Binary */ +#define ADF_C62X_FW "qat_c62x.bin" +#define ADF_C62X_MMP "qat_c62x_mmp.bin" + +void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c new file mode 100644 index 000000000000..512c56509718 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -0,0 +1,335 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include "adf_c62x_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62X_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C62X_PCI_DEVICE_ID: + adf_clean_hw_data_c62x(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C62X_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_c62x(accel_dev->hw_device); + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err_free_reg; + } + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err_free_reg; + } + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile new file mode 100644 index 000000000000..ecd708c213b2 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o +qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c new file mode 100644 index 000000000000..baf4b509c892 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -0,0 +1,173 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include <adf_pf2vf_msg.h> +#include <adf_common_drv.h> +#include "adf_c62xvf_hw_data.h" + +static struct adf_hw_device_class c62xiov_class = { + .name = ADF_C62XVF_DEVICE_NAME, + .type = DEV_C62XVF, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return ADF_C62XIOV_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return ADF_C62XIOV_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_MAX_ACCELENGINES; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_VF; +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C62XIOV_PF2VF_OFFSET; +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C62XIOV_VINTMSK_OFFSET; +} + +static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) +{ +} + +static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Init event to PF\n"); + return -EFAULT; + } + return 0; +} + +static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Shutdown event to PF\n"); +} + +void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c62xiov_class; + hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK; + hw_data->alloc_irq = adf_vf_isr_resource_alloc; + hw_data->free_irq = adf_vf_isr_resource_free; + hw_data->enable_error_correction = adf_vf_void_noop; + hw_data->init_admin_comms = adf_vf_int_noop; + hw_data->exit_admin_comms = adf_vf_void_noop; + hw_data->send_admin_init = adf_vf2pf_init; + hw_data->init_arb = adf_vf_int_noop; + hw_data->exit_arb = adf_vf_void_noop; + hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->enable_ints = adf_vf_void_noop; + hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); +} + +void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h index 85ff245bd1d8..a28d83e77422 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h @@ -3,7 +3,7 @@ redistributing this file, you may do so under either license. GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. + Copyright(c) 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. @@ -17,7 +17,7 @@ qat-linux@intel.com BSD LICENSE - Copyright(c) 2014 Intel Corporation. + Copyright(c) 2015 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -44,15 +44,21 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ADF_DH895x_DRV_H_ -#define ADF_DH895x_DRV_H_ -#include <adf_accel_devices.h> -#include <adf_transport.h> - -void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); -void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); -int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); -void adf_isr_resource_free(struct adf_accel_dev *accel_dev); -void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, - uint32_t const **arb_map_config); +#ifndef ADF_C62XVF_HW_DATA_H_ +#define ADF_C62XVF_HW_DATA_H_ + +#define ADF_C62XIOV_PMISC_BAR 1 +#define ADF_C62XIOV_ACCELERATORS_MASK 0x1 +#define ADF_C62XIOV_ACCELENGINES_MASK 0x1 +#define ADF_C62XIOV_MAX_ACCELERATORS 1 +#define ADF_C62XIOV_MAX_ACCELENGINES 1 +#define ADF_C62XIOV_RX_RINGS_OFFSET 8 +#define ADF_C62XIOV_TX_RINGS_MASK 0xFF +#define ADF_C62XIOV_ETR_BAR 0 +#define ADF_C62XIOV_ETR_MAX_BANKS 1 +#define ADF_C62XIOV_PF2VF_OFFSET 0x200 +#define ADF_C62XIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c new file mode 100644 index 000000000000..d2e4b928f3be --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include "adf_c62xvf_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62XVF_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + struct adf_accel_dev *pf; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C62XIOV_PCI_DEVICE_ID: + adf_clean_hw_data_c62xiov(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); + adf_devmgr_rm_dev(accel_dev, pf); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_dev *pf; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C62XIOV_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + accel_dev->is_vf = true; + pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table */ + if (adf_devmgr_add_dev(accel_dev, pf)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + INIT_LIST_HEAD(&accel_dev->crypto_list); + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + accel_dev->hw_device = hw_data; + adf_init_hw_data_c62xiov(accel_dev->hw_device); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + /* Completion for VF2PF request/response message exchange */ + init_completion(&accel_dev->vf.iov_msg_completion); + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + adf_clean_vf_map(true); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 9e9e196c6d51..29c7c53d2845 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -4,10 +4,12 @@ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \ $(obj)/qat_rsaprivkey-asn1.h clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h -clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h +clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ + adf_isr.o \ + adf_vf_isr.o \ adf_ctl_drv.o \ adf_dev_mgr.o \ adf_init.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index ca853d50b4b7..f96d427e502c 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -55,8 +55,20 @@ #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" +#define ADF_C62X_DEVICE_NAME "c62x" +#define ADF_C62XVF_DEVICE_NAME "c62xvf" +#define ADF_C3XXX_DEVICE_NAME "c3xxx" +#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 +#define ADF_C62X_PCI_DEVICE_ID 0x37c8 +#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 +#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 +#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 +#define ADF_ERRSOU3 (0x3A000 + 0x0C) +#define ADF_ERRSOU5 (0x3A000 + 0xD8) +#define ADF_DEVICE_FUSECTL_OFFSET 0x40 +#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 @@ -168,11 +180,11 @@ struct adf_hw_device_data { const char *fw_mmp_name; uint32_t fuses; uint32_t accel_capabilities_mask; + uint32_t instance_id; uint16_t accel_mask; uint16_t ae_mask; uint16_t tx_rings_mask; uint8_t tx_rx_gap; - uint8_t instance_id; uint8_t num_banks; uint8_t num_accel; uint8_t num_logical_accel; @@ -239,6 +251,6 @@ struct adf_accel_dev { } vf; }; bool is_vf; - uint8_t accel_id; + u32 accel_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 20b08bdcb146..a42fc42704be 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -78,9 +78,12 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) uof_addr = (void *)loader_data->uof_fw->data; mmp_size = loader_data->mmp_fw->size; mmp_addr = (void *)loader_data->mmp_fw->data; - qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); - if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { - dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); + if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { + dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n"); + goto out_err; + } + if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) { + dev_err(&GET_DEV(accel_dev), "Failed to map FW\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 147d755fed97..eb557f69e367 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -51,6 +51,7 @@ #include <linux/pci.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" +#include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" /* Admin Messages Registers */ @@ -234,7 +235,8 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *csr = pmisc->virt_addr; - void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; + void __iomem *mailbox = (void __iomem *)((uintptr_t)csr + + ADF_DH895XCC_MAILBOX_BASE_OFFSET); u64 reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 0a5ca0ba5d64..e78a1d7d88fc 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -82,7 +82,7 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; -static void adf_dev_restore(struct adf_accel_dev *accel_dev) +void adf_dev_restore(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; @@ -197,7 +197,7 @@ static void adf_resume(struct pci_dev *pdev) dev_info(&pdev->dev, "Device is up and runnig\n"); } -static struct pci_error_handlers adf_err_handler = { +static const struct pci_error_handlers adf_err_handler = { .error_detected = adf_error_detected, .slot_reset = adf_slot_reset, .resume = adf_resume, diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index c697fb1cdfb5..8c4f6573ce59 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -72,12 +72,16 @@ enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, DEV_DH895XCCVF, + DEV_C62X, + DEV_C62XVF, + DEV_C3XXX, + DEV_C3XXXVF }; struct adf_dev_status_info { enum adf_device_type type; - uint8_t accel_id; - uint8_t instance_id; + u32 accel_id; + u32 instance_id; uint8_t num_ae; uint8_t num_accel; uint8_t num_logical_accel; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 3f76bd495bcb..0e82ce3c383e 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -54,7 +54,7 @@ #include "icp_qat_hal.h" #define ADF_MAJOR_VERSION 0 -#define ADF_MINOR_VERSION 2 +#define ADF_MINOR_VERSION 6 #define ADF_BUILD_VERSION 0 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ __stringify(ADF_MINOR_VERSION) "." \ @@ -106,8 +106,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); -void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); -void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); @@ -143,6 +141,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); +void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); @@ -159,6 +158,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev); void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); int qat_crypto_register(void); int qat_crypto_unregister(void); +int qat_crypto_dev_config(struct adf_accel_dev *accel_dev); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); @@ -168,6 +168,11 @@ void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_isr_resource_free(struct adf_accel_dev *accel_dev); +int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); + int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, @@ -178,6 +183,8 @@ void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); +int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, + unsigned int ae); int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode); @@ -216,10 +223,10 @@ int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short lm_addr, unsigned int value); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); -int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size); -void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size); +int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, + int mem_size); +int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); @@ -227,6 +234,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); +void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); #else static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { @@ -236,5 +245,13 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } + +static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ +} #endif #endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 473d36d91644..5c897e6e7994 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -255,12 +255,9 @@ out: static int adf_ctl_is_device_in_use(int id) { - struct list_head *itr, *head = adf_devmgr_get_head(); - - list_for_each(itr, head) { - struct adf_accel_dev *dev = - list_entry(itr, struct adf_accel_dev, list); + struct adf_accel_dev *dev; + list_for_each_entry(dev, adf_devmgr_get_head(), list) { if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { dev_info(&GET_DEV(dev), @@ -275,12 +272,10 @@ static int adf_ctl_is_device_in_use(int id) static int adf_ctl_stop_devices(uint32_t id) { - struct list_head *itr, *head = adf_devmgr_get_head(); + struct adf_accel_dev *accel_dev; int ret = 0; - list_for_each(itr, head) { - struct adf_accel_dev *accel_dev = - list_entry(itr, struct adf_accel_dev, list); + list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) { if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (!adf_dev_started(accel_dev)) continue; @@ -342,12 +337,10 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, if (ret) return ret; + ret = -ENODEV; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); - if (!accel_dev) { - pr_err("QAT: Device %d not found\n", ctl_data->device_id); - ret = -ENODEV; + if (!accel_dev) goto out; - } if (!adf_dev_started(accel_dev)) { dev_info(&GET_DEV(accel_dev), diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 8dfdb8f90797..b3ebb25f9ca7 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -53,6 +53,7 @@ static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); static uint32_t num_devices; +static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { u32 bdf; @@ -116,8 +117,10 @@ void adf_clean_vf_map(bool vf) mutex_lock(&table_lock); list_for_each_safe(ptr, tmp, &vfs_table) { map = list_entry(ptr, struct vf_id_map, list); - if (map->bdf != -1) + if (map->bdf != -1) { + id_map[map->id] = 0; num_devices--; + } if (vf && map->bdf == -1) continue; @@ -154,6 +157,19 @@ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) } EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); +static unsigned int adf_find_free_id(void) +{ + unsigned int i; + + for (i = 0; i < ADF_MAX_DEVICES; i++) { + if (!id_map[i]) { + id_map[i] = 1; + return i; + } + } + return ADF_MAX_DEVICES + 1; +} + /** * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * @accel_dev: Pointer to acceleration device. @@ -194,8 +210,12 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, } list_add_tail(&accel_dev->list, &accel_table); - accel_dev->accel_id = num_devices++; - + accel_dev->accel_id = adf_find_free_id(); + if (accel_dev->accel_id > ADF_MAX_DEVICES) { + ret = -EFAULT; + goto unlock; + } + num_devices++; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; @@ -236,8 +256,13 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, ret = -ENOMEM; goto unlock; } - - accel_dev->accel_id = num_devices++; + accel_dev->accel_id = adf_find_free_id(); + if (accel_dev->accel_id > ADF_MAX_DEVICES) { + kfree(map); + ret = -EFAULT; + goto unlock; + } + num_devices++; list_add_tail(&accel_dev->list, &accel_table); map->bdf = adf_get_vf_num(accel_dev); map->id = accel_dev->accel_id; @@ -271,6 +296,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, { mutex_lock(&table_lock); if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { + id_map[accel_dev->accel_id] = 0; num_devices--; } else if (accel_dev->is_vf && pf) { struct vf_id_map *map, *next; diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index 6849422e04bb..f267d9e42e0b 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -45,6 +45,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "adf_accel_devices.h" +#include "adf_common_drv.h" #include "adf_transport_internal.h" #define ADF_ARB_NUM 4 @@ -124,19 +125,12 @@ int adf_init_arb(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_init_arb); -/** - * adf_update_ring_arb() - update ring arbitration rgister - * @accel_dev: Pointer to ring data. - * - * Function enables or disables rings for/from arbitration. - */ void adf_update_ring_arb(struct adf_etr_ring_data *ring) { WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, ring->bank->bank_number, ring->bank->ring_mask & 0xFF); } -EXPORT_SYMBOL_GPL(adf_update_ring_arb); void adf_exit_arb(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index d873eeecc363..ef5575e4a215 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -62,15 +62,6 @@ static void adf_service_add(struct service_hndl *service) mutex_unlock(&service_lock); } -/** - * adf_service_register() - Register acceleration service in the accel framework - * @service: Pointer to the service - * - * Function adds the acceleration service to the acceleration framework. - * To be used by QAT device specific drivers. - * - * Return: 0 on success, error code otherwise. - */ int adf_service_register(struct service_hndl *service) { service->init_status = 0; @@ -78,7 +69,6 @@ int adf_service_register(struct service_hndl *service) adf_service_add(service); return 0; } -EXPORT_SYMBOL_GPL(adf_service_register); static void adf_service_remove(struct service_hndl *service) { @@ -87,15 +77,6 @@ static void adf_service_remove(struct service_hndl *service) mutex_unlock(&service_lock); } -/** - * adf_service_unregister() - Unregister acceleration service from the framework - * @service: Pointer to the service - * - * Function remove the acceleration service from the acceleration framework. - * To be used by QAT device specific drivers. - * - * Return: 0 on success, error code otherwise. - */ int adf_service_unregister(struct service_hndl *service) { if (service->init_status || service->start_status) { @@ -105,7 +86,6 @@ int adf_service_unregister(struct service_hndl *service) adf_service_remove(service); return 0; } -EXPORT_SYMBOL_GPL(adf_service_unregister); /** * adf_dev_init() - Init data structures and services for the given accel device @@ -366,6 +346,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) hw_data->disable_iov(accel_dev); adf_cleanup_etr_data(accel_dev); + adf_dev_restore(accel_dev); } EXPORT_SYMBOL_GPL(adf_dev_shutdown); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index 5570f78795c1..b81f79acc4ea 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -51,15 +51,13 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/interrupt.h> -#include <adf_accel_devices.h> -#include <adf_common_drv.h> -#include <adf_cfg.h> -#include <adf_cfg_strings.h> -#include <adf_cfg_common.h> -#include <adf_transport_access_macros.h> -#include <adf_transport_internal.h> -#include "adf_drv.h" -#include "adf_dh895xcc_hw_data.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_cfg_common.h" +#include "adf_transport_access_macros.h" +#include "adf_transport_internal.h" static int adf_enable_msix(struct adf_accel_dev *accel_dev) { @@ -109,14 +107,16 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) #ifdef CONFIG_PCI_IOV /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ if (accel_dev->pf.vf_info) { - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 vf_mask; /* Get the interrupt sources triggered by VFs */ - vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) & 0x0000FFFF) << 16) | - ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & + ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) & 0x01FFFE00) >> 9); if (vf_mask) { @@ -301,6 +301,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) } } +/** + * adf_vf_isr_resource_free() - Free IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function frees interrupts for acceleration device. + */ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) { adf_free_irqs(accel_dev); @@ -308,7 +314,16 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) adf_disable_msix(&accel_dev->accel_pci_dev); adf_isr_free_msix_entry_table(accel_dev); } - +EXPORT_SYMBOL_GPL(adf_isr_resource_free); + +/** + * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function allocates interrupts for acceleration device. + * + * Return: 0 on success, error code otherwise. + */ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { int ret; @@ -330,3 +345,4 @@ err_out: adf_isr_resource_free(accel_dev); return -EFAULT; } +EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 5fdbad809343..b3875fdf6cd7 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -45,8 +45,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <linux/pci.h> -#include <linux/mutex.h> #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" @@ -58,12 +56,6 @@ #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) -/** - * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function enables PF to VF interrupts - */ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; @@ -73,14 +65,7 @@ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); } -EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); -/** - * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function disables PF to VF interrupts - */ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; @@ -90,7 +75,6 @@ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); } -EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) @@ -116,12 +100,6 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, } } -/** - * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function disables VF to PF interrupts - */ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; @@ -144,7 +122,6 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); } } -EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) { diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 3865ae8d96d9..57d2622728a5 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -122,7 +122,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) return -EAGAIN; } spin_lock_bh(&ring->lock); - memcpy(ring->base_addr + ring->tail, msg, + memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg, ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); ring->tail = adf_modulo(ring->tail + @@ -137,23 +137,22 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) static int adf_handle_response(struct adf_etr_ring_data *ring) { uint32_t msg_counter = 0; - uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); + uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { ring->callback((uint32_t *)msg); + atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; - msg = (uint32_t *)(ring->base_addr + ring->head); + msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); } - if (msg_counter > 0) { + if (msg_counter > 0) WRITE_CSR_RING_HEAD(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring->head); - atomic_sub(msg_counter, ring->inflights); - } return 0; } @@ -342,27 +341,15 @@ static void adf_ring_response_handler(struct adf_etr_bank_data *bank) } } -/** - * adf_response_handler() - Bottom half handler response handler - * @bank_addr: Address of a ring bank for with the BH was scheduled. - * - * Function is the bottom half handler for the response from acceleration - * device. There is one handler for every ring bank. Function checks all - * communication rings in the bank. - * To be used by QAT device specific drivers. - * - * Return: void - */ -void adf_response_handler(unsigned long bank_addr) +void adf_response_handler(uintptr_t bank_addr) { struct adf_etr_bank_data *bank = (void *)bank_addr; - /* Handle all the responses nad reenable IRQs */ + /* Handle all the responses and reenable IRQs */ adf_ring_response_handler(bank); WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, bank->irq_mask); } -EXPORT_SYMBOL_GPL(adf_response_handler); static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, @@ -447,6 +434,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, goto err; } + WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK); WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); return 0; err: diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 6ad7e4e1edca..80e02a2a0a09 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -50,12 +50,14 @@ #include "adf_accel_devices.h" #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF #define ADF_RING_CSR_RING_CONFIG 0x000 #define ADF_RING_CSR_RING_LBASE 0x040 #define ADF_RING_CSR_RING_UBASE 0x080 #define ADF_RING_CSR_RING_HEAD 0x0C0 #define ADF_RING_CSR_RING_TAIL 0x100 #define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 #define ADF_RING_CSR_INT_SRCSEL 0x174 #define ADF_RING_CSR_INT_SRCSEL_2 0x178 #define ADF_RING_CSR_INT_COL_EN 0x17C @@ -144,6 +146,9 @@ do { \ #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ do { \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index a4869627fd57..bb883368ac01 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -91,7 +91,7 @@ struct adf_etr_data { struct dentry *debug; }; -void adf_response_handler(unsigned long bank_addr); +void adf_response_handler(uintptr_t bank_addr); #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 87c5d8adb125..09427b3d4d55 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -51,16 +51,18 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/interrupt.h> -#include <adf_accel_devices.h> -#include <adf_common_drv.h> -#include <adf_cfg.h> -#include <adf_cfg_strings.h> -#include <adf_cfg_common.h> -#include <adf_transport_access_macros.h> -#include <adf_transport_internal.h> -#include <adf_pf2vf_msg.h> -#include "adf_drv.h" -#include "adf_dh895xccvf_hw_data.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_cfg_common.h" +#include "adf_transport_access_macros.h" +#include "adf_transport_internal.h" +#include "adf_pf2vf_msg.h" + +#define ADF_VINTSOU_OFFSET 0x204 +#define ADF_VINTSOU_BUN BIT(0) +#define ADF_VINTSOU_PF2VF BIT(1) static int adf_enable_msi(struct adf_accel_dev *accel_dev) { @@ -91,12 +93,14 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev) static void adf_pf2vf_bh_handler(void *data) { struct adf_accel_dev *accel_dev = data; - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 msg; /* Read the message from PF */ - msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET); + msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) /* Ignore legacy non-system (non-kernel) PF2VF messages */ @@ -124,8 +128,8 @@ static void adf_pf2vf_bh_handler(void *data) } /* To ack, clear the PF2VFINT bit */ - msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT; - ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg); + msg &= ~BIT(0); + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); @@ -155,15 +159,17 @@ static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) static irqreturn_t adf_isr(int irq, void *privdata) { struct adf_accel_dev *accel_dev = privdata; - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 v_int; /* Read VF INT source CSR to determine the source of VF interrupt */ - v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET); + v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); /* Check for PF2VF interrupt */ - if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) { + if (v_int & ADF_VINTSOU_PF2VF) { /* Disable PF to VF interrupt */ adf_disable_pf2vf_interrupts(accel_dev); @@ -173,7 +179,7 @@ static irqreturn_t adf_isr(int irq, void *privdata) } /* Check bundle interrupt */ - if (v_int & ADF_DH895XCC_VINTSOU_BUN) { + if (v_int & ADF_VINTSOU_BUN) { struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_bank_data *bank = &etr_data->banks[0]; @@ -226,6 +232,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) tasklet_kill(&priv_data->banks[0].resp_handler); } +/** + * adf_vf_isr_resource_free() - Free IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function frees interrupts for acceleration device virtual function. + */ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -236,7 +248,16 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) adf_cleanup_pf2vf_bh(accel_dev); adf_disable_msi(accel_dev); } - +EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free); + +/** + * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function allocates interrupts for acceleration device virtual function. + * + * Return: 0 on success, error code otherwise. + */ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { if (adf_enable_msi(accel_dev)) @@ -256,3 +277,4 @@ err_out: adf_vf_isr_resource_free(accel_dev); return -EFAULT; } +EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 5e1aa40c0404..2ffef3e4fd68 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -68,11 +68,21 @@ struct icp_qat_fw_loader_hal_handle { struct icp_qat_fw_loader_handle { struct icp_qat_fw_loader_hal_handle *hal_handle; + struct pci_dev *pci_dev; void *obj_handle; + void *sobj_handle; + bool fw_auth; void __iomem *hal_sram_addr_v; void __iomem *hal_cap_g_ctl_csr_addr_v; void __iomem *hal_cap_ae_xfer_csr_addr_v; void __iomem *hal_cap_ae_local_csr_addr_v; void __iomem *hal_ep_csr_addr_v; }; + +struct icp_firml_dram_desc { + void __iomem *dram_base_addr; + void *dram_base_addr_v; + dma_addr_t dram_bus_addr; + u64 dram_size; +}; #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 85b6d241ea82..7187917533d0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -81,6 +81,31 @@ enum hal_ae_csr { LOCAL_CSR_STATUS = 0x180, }; +enum fcu_csr { + FCU_CONTROL = 0x8c0, + FCU_STATUS = 0x8c4, + FCU_STATUS1 = 0x8c8, + FCU_DRAM_ADDR_LO = 0x8cc, + FCU_DRAM_ADDR_HI = 0x8d0, + FCU_RAMBASE_ADDR_HI = 0x8d4, + FCU_RAMBASE_ADDR_LO = 0x8d8 +}; + +enum fcu_cmd { + FCU_CTRL_CMD_NOOP = 0, + FCU_CTRL_CMD_AUTH = 1, + FCU_CTRL_CMD_LOAD = 2, + FCU_CTRL_CMD_START = 3 +}; + +enum fcu_sts { + FCU_STS_NO_STS = 0, + FCU_STS_VERI_DONE = 1, + FCU_STS_LOAD_DONE = 2, + FCU_STS_VERI_FAIL = 3, + FCU_STS_LOAD_FAIL = 4, + FCU_STS_BUSY = 5 +}; #define UA_ECS (0x1 << 31) #define ACS_ABO_BITPOS 31 #define ACS_ACNO 0x7 @@ -98,6 +123,13 @@ enum hal_ae_csr { #define LCS_STATUS (0x1) #define MMC_SHARE_CS_BITPOS 2 #define GLOBAL_CSR 0xA00 +#define FCU_CTRL_AE_POS 0x8 +#define FCU_AUTH_STS_MASK 0x7 +#define FCU_STS_DONE_POS 0x9 +#define FCU_STS_AUTHFWLD_POS 0X8 +#define FCU_LOADED_AE_POS 0x16 +#define FW_AUTH_WAIT_PERIOD 10 +#define FW_AUTH_MAX_RETRY 300 #define SET_CAP_CSR(handle, csr, val) \ ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) @@ -106,14 +138,14 @@ enum hal_ae_csr { #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) #define AE_CSR(handle, ae) \ - (handle->hal_cap_ae_local_csr_addr_v + \ + ((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \ ((ae & handle->hal_handle->ae_mask) << 12)) #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) #define SET_AE_CSR(handle, ae, csr, val) \ ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) #define AE_XFER(handle, ae) \ - (handle->hal_cap_ae_xfer_csr_addr_v + \ + ((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \ ((ae & handle->hal_handle->ae_mask) << 12)) #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ ((reg & 0xff) << 2)) @@ -121,5 +153,4 @@ enum hal_ae_csr { ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) #define SRAM_WRITE(handle, addr, val) \ ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) -#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 2132a8cbc4ec..d97db990955d 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -47,32 +47,55 @@ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ -#define ICP_QAT_AC_C_CPU_TYPE 0x00400000 +#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000 +#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 +#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_UCLO_MAX_AE 12 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 #define ICP_QAT_UCLO_MAX_XFER_REG 128 #define ICP_QAT_UCLO_MAX_GPR_REG 128 -#define ICP_QAT_UCLO_MAX_NN_REG 128 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 #define ICP_QAT_UCLO_AE_ALL_CTX 0xff #define ICP_QAT_UOF_OBJID_LEN 8 #define ICP_QAT_UOF_FID 0xc6c2 #define ICP_QAT_UOF_MAJVER 0x4 #define ICP_QAT_UOF_MINVER 0x11 -#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff #define ICP_QAT_UOF_OBJS "UOF_OBJS" #define ICP_QAT_UOF_STRT "UOF_STRT" -#define ICP_QAT_UOF_GTID "UOF_GTID" #define ICP_QAT_UOF_IMAG "UOF_IMAG" #define ICP_QAT_UOF_IMEM "UOF_IMEM" -#define ICP_QAT_UOF_MSEG "UOF_MSEG" #define ICP_QAT_UOF_LOCAL_SCOPE 1 #define ICP_QAT_UOF_INIT_EXPR 0 #define ICP_QAT_UOF_INIT_REG 1 #define ICP_QAT_UOF_INIT_REG_CTX 2 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 +#define ICP_QAT_SUOF_OBJ_ID_LEN 8 +#define ICP_QAT_SUOF_FID 0x53554f46 +#define ICP_QAT_SUOF_MAJVER 0x0 +#define ICP_QAT_SUOF_MINVER 0x1 +#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) +#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) +#define ICP_QAT_CSS_FWSK_MODULUS_LEN 256 +#define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4 +#define ICP_QAT_CSS_FWSK_PAD_LEN 252 +#define ICP_QAT_CSS_FWSK_PUB_LEN (ICP_QAT_CSS_FWSK_MODULUS_LEN + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ + ICP_QAT_CSS_FWSK_PAD_LEN) +#define ICP_QAT_CSS_SIGNATURE_LEN 256 +#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ + ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ + ICP_QAT_SIMG_AE_INSTS_LEN) +#define ICP_QAT_CSS_AE_SIMG_LEN (sizeof(struct icp_qat_css_hdr) + \ + ICP_QAT_CSS_FWSK_PUB_LEN + \ + ICP_QAT_CSS_SIGNATURE_LEN + \ + ICP_QAT_CSS_AE_IMG_LEN) +#define ICP_QAT_AE_IMG_OFFSET (sizeof(struct icp_qat_css_hdr) + \ + ICP_QAT_CSS_FWSK_MODULUS_LEN + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ + ICP_QAT_CSS_SIGNATURE_LEN) +#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) @@ -112,6 +135,11 @@ enum icp_qat_uof_regtype { ICP_NEIGH_REL, }; +enum icp_qat_css_fwtype { + CSS_AE_FIRMWARE = 0, + CSS_MMP_FIRMWARE = 1 +}; + struct icp_qat_uclo_page { struct icp_qat_uclo_encap_page *encap_page; struct icp_qat_uclo_region *region; @@ -235,7 +263,7 @@ struct icp_qat_uof_filechunkhdr { }; struct icp_qat_uof_objhdr { - unsigned int cpu_type; + unsigned int ac_dev_type; unsigned short min_cpu_ver; unsigned short max_cpu_ver; short max_chunks; @@ -326,7 +354,7 @@ struct icp_qat_uof_image { unsigned int img_name; unsigned int ae_assigned; unsigned int ctx_assigned; - unsigned int cpu_type; + unsigned int ac_dev_type; unsigned int entry_address; unsigned int fill_pattern[2]; unsigned int reloadable_size; @@ -374,4 +402,127 @@ struct icp_qat_uof_batch_init { unsigned int size; struct icp_qat_uof_batch_init *next; }; + +struct icp_qat_suof_img_hdr { + char *simg_buf; + unsigned long simg_len; + char *css_header; + char *css_key; + char *css_signature; + char *css_simg; + unsigned long simg_size; + unsigned int ae_num; + unsigned int ae_mask; + unsigned int fw_type; + unsigned long simg_name; + unsigned long appmeta_data; +}; + +struct icp_qat_suof_img_tbl { + unsigned int num_simgs; + struct icp_qat_suof_img_hdr *simg_hdr; +}; + +struct icp_qat_suof_handle { + unsigned int file_id; + unsigned int check_sum; + char min_ver; + char maj_ver; + char fw_type; + char *suof_buf; + unsigned int suof_size; + char *sym_str; + unsigned int sym_size; + struct icp_qat_suof_img_tbl img_table; +}; + +struct icp_qat_fw_auth_desc { + unsigned int img_len; + unsigned int reserved; + unsigned int css_hdr_high; + unsigned int css_hdr_low; + unsigned int img_high; + unsigned int img_low; + unsigned int signature_high; + unsigned int signature_low; + unsigned int fwsk_pub_high; + unsigned int fwsk_pub_low; + unsigned int img_ae_mode_data_high; + unsigned int img_ae_mode_data_low; + unsigned int img_ae_init_data_high; + unsigned int img_ae_init_data_low; + unsigned int img_ae_insts_high; + unsigned int img_ae_insts_low; +}; + +struct icp_qat_auth_chunk { + struct icp_qat_fw_auth_desc fw_auth_desc; + u64 chunk_size; + u64 chunk_bus_addr; +}; + +struct icp_qat_css_hdr { + unsigned int module_type; + unsigned int header_len; + unsigned int header_ver; + unsigned int module_id; + unsigned int module_vendor; + unsigned int date; + unsigned int size; + unsigned int key_size; + unsigned int module_size; + unsigned int exponent_size; + unsigned int fw_type; + unsigned int reserved[21]; +}; + +struct icp_qat_simg_ae_mode { + unsigned int file_id; + unsigned short maj_ver; + unsigned short min_ver; + unsigned int dev_type; + unsigned short devmax_ver; + unsigned short devmin_ver; + unsigned int ae_mask; + unsigned int ctx_enables; + char fw_type; + char ctx_mode; + char nn_mode; + char lm0_mode; + char lm1_mode; + char scs_mode; + char lm2_mode; + char lm3_mode; + char tindex_mode; + unsigned char reserved[7]; + char simg_name[256]; + char appmeta_data[256]; +}; + +struct icp_qat_suof_filehdr { + unsigned int file_id; + unsigned int check_sum; + char min_ver; + char maj_ver; + char fw_type; + char reserved; + unsigned short max_chunks; + unsigned short num_chunks; +}; + +struct icp_qat_suof_chunk_hdr { + char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN]; + u64 offset; + u64 size; +}; + +struct icp_qat_suof_strtable { + unsigned int tab_length; + unsigned int strings; +}; + +struct icp_qat_suof_objhdr { + unsigned int img_length; + unsigned int reserved; +}; #endif diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 9cab15497f04..3852d31ce0a4 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -49,6 +49,7 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport.h" +#include "adf_transport_access_macros.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "qat_crypto.h" @@ -66,13 +67,10 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst) static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) { - struct qat_crypto_instance *inst; - struct list_head *list_ptr, *tmp; + struct qat_crypto_instance *inst, *tmp; int i; - list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { - inst = list_entry(list_ptr, struct qat_crypto_instance, list); - + list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) { for (i = 0; i < atomic_read(&inst->refctr); i++) qat_crypto_put_instance(inst); @@ -88,7 +86,7 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) if (inst->pke_rx) adf_remove_ring(inst->pke_rx); - list_del(list_ptr); + list_del(&inst->list); kfree(inst); } return 0; @@ -96,17 +94,13 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) struct qat_crypto_instance *qat_crypto_get_instance_node(int node) { - struct adf_accel_dev *accel_dev = NULL; - struct qat_crypto_instance *inst = NULL; - struct list_head *itr; + struct adf_accel_dev *accel_dev = NULL, *tmp_dev; + struct qat_crypto_instance *inst = NULL, *tmp_inst; unsigned long best = ~0; - list_for_each(itr, adf_devmgr_get_head()) { - struct adf_accel_dev *tmp_dev; + list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { unsigned long ctr; - tmp_dev = list_entry(itr, struct adf_accel_dev, list); - if ((node == dev_to_node(&GET_DEV(tmp_dev)) || dev_to_node(&GET_DEV(tmp_dev)) < 0) && adf_dev_started(tmp_dev) && @@ -118,19 +112,16 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } } } - if (!accel_dev) - pr_info("QAT: Could not find a device on node %d\n", node); - - /* Get any started device */ - list_for_each(itr, adf_devmgr_get_head()) { - struct adf_accel_dev *tmp_dev; - tmp_dev = list_entry(itr, struct adf_accel_dev, list); - - if (adf_dev_started(tmp_dev) && - !list_empty(&tmp_dev->crypto_list)) { - accel_dev = tmp_dev; - break; + if (!accel_dev) { + pr_info("QAT: Could not find a device on node %d\n", node); + /* Get any started device */ + list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { + if (adf_dev_started(tmp_dev) && + !list_empty(&tmp_dev->crypto_list)) { + accel_dev = tmp_dev; + break; + } } } @@ -138,11 +129,9 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) return NULL; best = ~0; - list_for_each(itr, &accel_dev->crypto_list) { - struct qat_crypto_instance *tmp_inst; + list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) { unsigned long ctr; - tmp_inst = list_entry(itr, struct qat_crypto_instance, list); ctr = atomic_read(&tmp_inst->refctr); if (best > ctr) { inst = tmp_inst; @@ -159,6 +148,97 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) return inst; } +/** + * qat_crypto_dev_config() - create dev config required to create crypto inst. + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create crypto instances + * + * Return: 0 on success, error code otherwise. + */ +int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + int cpus = num_online_cpus(); + int banks = GET_MAX_BANKS(accel_dev); + int instances = min(cpus, banks); + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int i; + unsigned long val; + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + goto err; + if (adf_cfg_section_add(accel_dev, "Accelerator0")) + goto err; + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, (void *)&val, ADF_DEC)) + goto err; + } + + val = i; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(qat_crypto_dev_config); + static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) { int i; diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 380e761801a7..0ac0ba867611 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -45,21 +45,22 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> +#include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_hal.h" #include "icp_qat_uclo.h" -#define BAD_REGADDR 0xffff -#define MAX_RETRY_TIMES 10000 -#define INIT_CTX_ARB_VALUE 0x0 +#define BAD_REGADDR 0xffff +#define MAX_RETRY_TIMES 10000 +#define INIT_CTX_ARB_VALUE 0x0 #define INIT_CTX_ENABLE_VALUE 0x0 -#define INIT_PC_VALUE 0x0 +#define INIT_PC_VALUE 0x0 #define INIT_WAKEUP_EVENTS_VALUE 0x1 #define INIT_SIG_EVENTS_VALUE 0x1 #define INIT_CCENABLE_VALUE 0x2000 -#define RST_CSR_QAT_LSB 20 +#define RST_CSR_QAT_LSB 20 #define RST_CSR_AE_LSB 0 #define MC_TIMESTAMP_ENABLE (0x1 << 7) @@ -185,7 +186,7 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) return 0; } - if (!times) { + if (times < 0) { pr_err("QAT: wait_num_cycles time out\n"); return -EFAULT; } @@ -391,9 +392,6 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) unsigned int times = MAX_RETRY_TIMES; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&base_cnt); base_cnt &= 0xffff; @@ -413,6 +411,20 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) return 0; } +int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, + unsigned int ae) +{ + unsigned int enable = 0, active = 0; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); + if ((enable & (0xff << CE_ENABLE_BITPOS)) || + (active & (1 << ACS_ABO_BITPOS))) + return 1; + else + return 0; +} + static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) { unsigned int misc_ctl; @@ -425,8 +437,6 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) (~MC_TIMESTAMP_ENABLE)); for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); } @@ -440,8 +450,9 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) { - void __iomem *csr_addr = handle->hal_ep_csr_addr_v + - ESRAM_AUTO_INIT_CSR_OFFSET; + void __iomem *csr_addr = + (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v + + ESRAM_AUTO_INIT_CSR_OFFSET); unsigned int csr_val, times = 30; csr_val = ADF_CSR_RD(csr_addr, 0); @@ -493,8 +504,6 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) /* Set undefined power-up/reset states to reasonable default values */ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, @@ -598,25 +607,31 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } -static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; - unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; - int times = MAX_RETRY_TIMES; - unsigned int csr_val = 0; unsigned short reg; - unsigned int savctx = 0; - int ret = 0; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, reg, 0); qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, reg, 0); } + } +} + +static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae; + unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; + int times = MAX_RETRY_TIMES; + unsigned int csr_val = 0; + unsigned int savctx = 0; + int ret = 0; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); @@ -638,8 +653,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) qat_hal_enable_ctx(handle, ae, ctx_mask); } for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; /* wait for AE to finish */ do { ret = qat_hal_wait_cycles(handle, ae, 20, 1); @@ -667,10 +680,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) return 0; } -#define ICP_DH895XCC_AE_OFFSET 0x20000 -#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) +#define ICP_QAT_AE_OFFSET 0x20000 +#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000) #define LOCAL_TO_XFER_REG_OFFSET 0x800 -#define ICP_DH895XCC_EP_OFFSET 0x3a000 +#define ICP_QAT_EP_OFFSET 0x3a000 int qat_hal_init(struct adf_accel_dev *accel_dev) { unsigned char ae; @@ -687,15 +700,22 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) if (!handle) return -ENOMEM; - handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_CAP_OFFSET; - handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_AE_OFFSET; - handle->hal_ep_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_EP_OFFSET; - handle->hal_cap_ae_local_csr_addr_v = - handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; handle->hal_sram_addr_v = sram_bar->virt_addr; + handle->hal_cap_g_ctl_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_CAP_OFFSET); + handle->hal_cap_ae_xfer_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_AE_OFFSET); + handle->hal_ep_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_EP_OFFSET); + handle->hal_cap_ae_local_csr_addr_v = + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + + LOCAL_TO_XFER_REG_OFFSET); + handle->pci_dev = pci_info->pci_dev; + handle->fw_auth = (handle->pci_dev->device == + ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); if (!handle->hal_handle) goto out_hal_handle; @@ -723,14 +743,16 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); goto out_err; } - if (qat_hal_clear_gpr(handle)) - goto out_err; + qat_hal_clear_xfer(handle); + if (!handle->fw_auth) { + if (qat_hal_clear_gpr(handle)) + goto out_err; + } + /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { unsigned int csr_val = 0; - if (!(hw_data->ae_mask & (1 << ae))) - continue; qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); csr_val |= 0x1; qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); @@ -756,15 +778,31 @@ void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { - qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & + int retry = 0; + unsigned int fcu_sts = 0; + + if (handle->fw_auth) { + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START); + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) + return; + } while (retry++ < FW_AUTH_MAX_RETRY); + pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, + fcu_sts); + } else { + qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); - qat_hal_enable_ctx(handle, ae, ctx_mask); + qat_hal_enable_ctx(handle, ae, ctx_mask); + } } void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { - qat_hal_disable_ctx(handle, ae, ctx_mask); + if (!handle->fw_auth) + qat_hal_disable_ctx(handle, ae, ctx_mask); } void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index c48f181e8941..25d15f19c2b3 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -47,7 +47,7 @@ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> - +#include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" @@ -119,10 +119,10 @@ static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, { if ((!str_table->table_len) || (str_offset > str_table->table_len)) return NULL; - return (char *)(((unsigned long)(str_table->strings)) + str_offset); + return (char *)(((uintptr_t)(str_table->strings)) + str_offset); } -static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) +static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) { int maj = hdr->maj_ver & 0xff; int min = hdr->min_ver & 0xff; @@ -139,6 +139,31 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) return 0; } +static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) +{ + int maj = suof_hdr->maj_ver & 0xff; + int min = suof_hdr->min_ver & 0xff; + + if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { + pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); + return -EINVAL; + } + if (suof_hdr->fw_type != 0) { + pr_err("QAT: unsupported firmware type\n"); + return -EINVAL; + } + if (suof_hdr->num_chunks <= 0x1) { + pr_err("QAT: SUOF chunk amount is incorrect\n"); + return -EINVAL; + } + if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { + pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", + maj, min); + return -EINVAL; + } + return 0; +} + static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, unsigned int addr, unsigned int *val, unsigned int num_in_bytes) @@ -275,7 +300,7 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle unsigned int i, flag = 0; mem_val_attr = - (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + sizeof(struct icp_qat_uof_initmem)); init_header = *init_tab_base; @@ -425,8 +450,8 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) if (qat_uclo_init_ae_memory(handle, initmem)) return -EINVAL; } - initmem = (struct icp_qat_uof_initmem *)((unsigned long)( - (unsigned long)initmem + + initmem = (struct icp_qat_uof_initmem *)((uintptr_t)( + (uintptr_t)initmem + sizeof(struct icp_qat_uof_initmem)) + (sizeof(struct icp_qat_uof_memvar_attr) * initmem->val_attr_num)); @@ -454,7 +479,7 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, int i; struct icp_qat_uof_chunkhdr *chunk_hdr = (struct icp_qat_uof_chunkhdr *) - ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); + ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); for (i = 0; i < obj_hdr->num_chunks; i++) { if ((cur < (void *)&chunk_hdr[i]) && @@ -596,7 +621,7 @@ static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; for (i = 0; i < uword_block_tab->entry_num; i++) page->uwblock[i].micro_words = - (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; + (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; } static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, @@ -697,7 +722,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, memcpy(&str_table->table_len, obj_hdr->file_buff + chunk_hdr->offset, sizeof(str_table->table_len)); hdr_size = (char *)&str_table->strings - (char *)str_table; - str_table->strings = (unsigned long)obj_hdr->file_buff + + str_table->strings = (uintptr_t)obj_hdr->file_buff + chunk_hdr->offset + hdr_size; return str_table; } @@ -721,13 +746,31 @@ qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, } } +static unsigned int +qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) +{ + switch (handle->pci_dev->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + return ICP_QAT_AC_895XCC_DEV_TYPE; + case ADF_C62X_PCI_DEVICE_ID: + return ICP_QAT_AC_C62X_DEV_TYPE; + case ADF_C3XXX_PCI_DEVICE_ID: + return ICP_QAT_AC_C3XXX_DEV_TYPE; + default: + pr_err("QAT: unsupported device 0x%x\n", + handle->pci_dev->device); + return 0; + } +} + static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) { unsigned int maj_ver, prod_type = obj_handle->prod_type; - if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { - pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", - obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); + if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { + pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", + obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, + prod_type); return -EINVAL; } maj_ver = obj_handle->prod_rev & 0xff; @@ -932,7 +975,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) obj_handle->obj_hdr->file_buff; obj_handle->uword_in_bytes = 6; - obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; + obj_handle->prod_type = qat_uclo_get_dev_type(handle); obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { @@ -969,23 +1012,435 @@ out_err: return -EFAULT; } -void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size) +static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_filehdr *suof_ptr, + int suof_size) { - qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); + unsigned int check_sum = 0; + unsigned int min_ver_offset = 0; + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + + suof_handle->file_id = ICP_QAT_SUOF_FID; + suof_handle->suof_buf = (char *)suof_ptr; + suof_handle->suof_size = suof_size; + min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, + min_ver); + check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, + min_ver_offset); + if (check_sum != suof_ptr->check_sum) { + pr_err("QAT: incorrect SUOF checksum\n"); + return -EINVAL; + } + suof_handle->check_sum = suof_ptr->check_sum; + suof_handle->min_ver = suof_ptr->min_ver; + suof_handle->maj_ver = suof_ptr->maj_ver; + suof_handle->fw_type = suof_ptr->fw_type; + return 0; } -int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size) +static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle, + struct icp_qat_suof_img_hdr *suof_img_hdr, + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { - struct icp_qat_uof_filehdr *filehdr; - struct icp_qat_uclo_objhandle *objhdl; + struct icp_qat_simg_ae_mode *ae_mode; + struct icp_qat_suof_objhdr *suof_objhdr; + + suof_img_hdr->simg_buf = (suof_handle->suof_buf + + suof_chunk_hdr->offset + + sizeof(*suof_objhdr)); + suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t) + (suof_handle->suof_buf + + suof_chunk_hdr->offset))->img_length; + + suof_img_hdr->css_header = suof_img_hdr->simg_buf; + suof_img_hdr->css_key = (suof_img_hdr->css_header + + sizeof(struct icp_qat_css_hdr)); + suof_img_hdr->css_signature = suof_img_hdr->css_key + + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_EXPONENT_LEN; + suof_img_hdr->css_simg = suof_img_hdr->css_signature + + ICP_QAT_CSS_SIGNATURE_LEN; + + ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); + suof_img_hdr->ae_mask = ae_mode->ae_mask; + suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; + suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; + suof_img_hdr->fw_type = ae_mode->fw_type; +} - BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= - (sizeof(handle->hal_handle->ae_mask) * 8)); +static void +qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) +{ + char **sym_str = (char **)&suof_handle->sym_str; + unsigned int *sym_size = &suof_handle->sym_size; + struct icp_qat_suof_strtable *str_table_obj; + + *sym_size = *(unsigned int *)(uintptr_t) + (suof_chunk_hdr->offset + suof_handle->suof_buf); + *sym_str = (char *)(uintptr_t) + (suof_handle->suof_buf + suof_chunk_hdr->offset + + sizeof(str_table_obj->tab_length)); +} - if (!handle || !addr_ptr || mem_size < 24) +static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_img_hdr *img_hdr) +{ + struct icp_qat_simg_ae_mode *img_ae_mode = NULL; + unsigned int prod_rev, maj_ver, prod_type; + + prod_type = qat_uclo_get_dev_type(handle); + img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg; + prod_rev = PID_MAJOR_REV | + (PID_MINOR_REV & handle->hal_handle->revision_id); + if (img_ae_mode->dev_type != prod_type) { + pr_err("QAT: incompatible product type %x\n", + img_ae_mode->dev_type); return -EINVAL; + } + maj_ver = prod_rev & 0xff; + if ((maj_ver > img_ae_mode->devmax_ver) || + (maj_ver < img_ae_mode->devmin_ver)) { + pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); + return -EINVAL; + } + return 0; +} + +static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; + + kfree(sobj_handle->img_table.simg_hdr); + sobj_handle->img_table.simg_hdr = NULL; + kfree(handle->sobj_handle); + handle->sobj_handle = NULL; +} + +static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, + unsigned int img_id, unsigned int num_simgs) +{ + struct icp_qat_suof_img_hdr img_header; + + if (img_id != num_simgs - 1) { + memcpy(&img_header, &suof_img_hdr[num_simgs - 1], + sizeof(*suof_img_hdr)); + memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], + sizeof(*suof_img_hdr)); + memcpy(&suof_img_hdr[img_id], &img_header, + sizeof(*suof_img_hdr)); + } +} + +static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_filehdr *suof_ptr, + int suof_size) +{ + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; + struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; + int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; + unsigned int i = 0; + struct icp_qat_suof_img_hdr img_header; + + if (!suof_ptr || (suof_size == 0)) { + pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); + return -EINVAL; + } + if (qat_uclo_check_suof_format(suof_ptr)) + return -EINVAL; + ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); + if (ret) + return ret; + suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *) + ((uintptr_t)suof_ptr + sizeof(*suof_ptr)); + + qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); + suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; + + if (suof_handle->img_table.num_simgs != 0) { + suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs * + sizeof(img_header), GFP_KERNEL); + if (!suof_img_hdr) + return -ENOMEM; + suof_handle->img_table.simg_hdr = suof_img_hdr; + } + + for (i = 0; i < suof_handle->img_table.num_simgs; i++) { + qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i], + &suof_chunk_hdr[1 + i]); + ret = qat_uclo_check_simg_compat(handle, + &suof_img_hdr[i]); + if (ret) + return ret; + if ((suof_img_hdr[i].ae_mask & 0x1) != 0) + ae0_img = i; + } + qat_uclo_tail_img(suof_img_hdr, ae0_img, + suof_handle->img_table.num_simgs); + return 0; +} + +#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) +#define BITS_IN_DWORD 32 + +static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc *desc) +{ + unsigned int fcu_sts, retry = 0; + u64 bus_addr; + + bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) + - sizeof(struct icp_qat_auth_chunk); + SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD)); + SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr); + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH); + + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) + goto auth_fail; + if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) + return 0; + } while (retry++ < FW_AUTH_MAX_RETRY); +auth_fail: + pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", + fcu_sts & FCU_AUTH_STS_MASK, retry); + return -EINVAL; +} + +static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, + struct icp_firml_dram_desc *dram_desc, + unsigned int size) +{ + void *vptr; + dma_addr_t ptr; + + vptr = dma_alloc_coherent(&handle->pci_dev->dev, + size, &ptr, GFP_KERNEL); + if (!vptr) + return -ENOMEM; + dram_desc->dram_base_addr_v = vptr; + dram_desc->dram_bus_addr = ptr; + dram_desc->dram_size = size; + return 0; +} + +static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, + struct icp_firml_dram_desc *dram_desc) +{ + dma_free_coherent(&handle->pci_dev->dev, + (size_t)(dram_desc->dram_size), + (dram_desc->dram_base_addr_v), + dram_desc->dram_bus_addr); + memset(dram_desc, 0, sizeof(*dram_desc)); +} + +static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_firml_dram_desc dram_desc; + + dram_desc.dram_base_addr_v = *desc; + dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *) + (*desc))->chunk_bus_addr; + dram_desc.dram_size = ((struct icp_qat_auth_chunk *) + (*desc))->chunk_size; + qat_uclo_simg_free(handle, &dram_desc); +} + +static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; + struct icp_qat_fw_auth_desc *auth_desc; + struct icp_qat_auth_chunk *auth_chunk; + u64 virt_addr, bus_addr, virt_base; + unsigned int length, simg_offset = sizeof(*auth_chunk); + struct icp_firml_dram_desc img_desc; + + if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) { + pr_err("QAT: error, input image size overflow %d\n", size); + return -EINVAL; + } + length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? + ICP_QAT_CSS_AE_SIMG_LEN + simg_offset : + size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset; + if (qat_uclo_simg_alloc(handle, &img_desc, length)) { + pr_err("QAT: error, allocate continuous dram fail\n"); + return -ENOMEM; + } + + auth_chunk = img_desc.dram_base_addr_v; + auth_chunk->chunk_size = img_desc.dram_size; + auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; + virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; + bus_addr = img_desc.dram_bus_addr + simg_offset; + auth_desc = img_desc.dram_base_addr_v; + auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->css_hdr_low = (unsigned int)bus_addr; + virt_addr = virt_base; + + memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); + /* pub key */ + bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + + sizeof(*css_hdr); + virt_addr = virt_addr + sizeof(*css_hdr); + + auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->fwsk_pub_low = (unsigned int)bus_addr; + + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + sizeof(*css_hdr)), + ICP_QAT_CSS_FWSK_MODULUS_LEN); + /* padding */ + memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN), + 0, ICP_QAT_CSS_FWSK_PAD_LEN); + + /* exponent */ + memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_PAD_LEN), + (void *)(image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN), + sizeof(unsigned int)); + + /* signature */ + bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, + auth_desc->fwsk_pub_low) + + ICP_QAT_CSS_FWSK_PUB_LEN; + virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN; + auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->signature_low = (unsigned int)bus_addr; + + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_EXPONENT_LEN), + ICP_QAT_CSS_SIGNATURE_LEN); + + bus_addr = ADD_ADDR(auth_desc->signature_high, + auth_desc->signature_low) + + ICP_QAT_CSS_SIGNATURE_LEN; + virt_addr += ICP_QAT_CSS_SIGNATURE_LEN; + + auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->img_low = (unsigned int)bus_addr; + auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET; + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + ICP_QAT_AE_IMG_OFFSET), + auth_desc->img_len); + virt_addr = virt_base; + /* AE firmware */ + if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == + CSS_AE_FIRMWARE) { + auth_desc->img_ae_mode_data_high = auth_desc->img_high; + auth_desc->img_ae_mode_data_low = auth_desc->img_low; + bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, + auth_desc->img_ae_mode_data_low) + + sizeof(struct icp_qat_simg_ae_mode); + + auth_desc->img_ae_init_data_high = (unsigned int) + (bus_addr >> BITS_IN_DWORD); + auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; + bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; + auth_desc->img_ae_insts_high = (unsigned int) + (bus_addr >> BITS_IN_DWORD); + auth_desc->img_ae_insts_low = (unsigned int)bus_addr; + } else { + auth_desc->img_ae_insts_high = auth_desc->img_high; + auth_desc->img_ae_insts_low = auth_desc->img_low; + } + *desc = auth_desc; + return 0; +} + +static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc *desc) +{ + unsigned int i; + unsigned int fcu_sts; + struct icp_qat_simg_ae_mode *virt_addr; + unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS; + + virt_addr = (void *)((uintptr_t)desc + + sizeof(struct icp_qat_auth_chunk) + + sizeof(struct icp_qat_css_hdr) + + ICP_QAT_CSS_FWSK_PUB_LEN + + ICP_QAT_CSS_SIGNATURE_LEN); + for (i = 0; i < handle->hal_handle->ae_max_num; i++) { + int retry = 0; + + if (!((virt_addr->ae_mask >> i) & 0x1)) + continue; + if (qat_hal_check_ae_active(handle, i)) { + pr_err("QAT: AE %d is active\n", i); + return -EINVAL; + } + SET_CAP_CSR(handle, FCU_CONTROL, + (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS))); + + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if (((fcu_sts & FCU_AUTH_STS_MASK) == + FCU_STS_LOAD_DONE) && + ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i))) + break; + } while (retry++ < FW_AUTH_MAX_RETRY); + if (retry > FW_AUTH_MAX_RETRY) { + pr_err("QAT: firmware load failed timeout %x\n", retry); + return -EINVAL; + } + } + return 0; +} + +static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_suof_handle *suof_handle; + + suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL); + if (!suof_handle) + return -ENOMEM; + handle->sobj_handle = suof_handle; + if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { + qat_uclo_del_suof(handle); + pr_err("QAT: map SUOF failed\n"); + return -EINVAL; + } + return 0; +} + +int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_fw_auth_desc *desc = NULL; + int status = 0; + + if (handle->fw_auth) { + if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc)) + status = qat_uclo_auth_fw(handle, desc); + qat_uclo_ummap_auth_fw(handle, &desc); + } else { + if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) { + pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); + return -EINVAL; + } + qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); + } + return status; +} + +static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_uof_filehdr *filehdr; + struct icp_qat_uclo_objhandle *objhdl; + objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); if (!objhdl) return -ENOMEM; @@ -993,7 +1448,7 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, if (!objhdl->obj_buf) goto out_objbuf_err; filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; - if (qat_uclo_check_format(filehdr)) + if (qat_uclo_check_uof_format(filehdr)) goto out_objhdr_err; objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); @@ -1016,11 +1471,27 @@ out_objbuf_err: return -ENOMEM; } +int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= + (sizeof(handle->hal_handle->ae_mask) * 8)); + + if (!handle || !addr_ptr || mem_size < 24) + return -EINVAL; + + return (handle->fw_auth) ? + qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) : + qat_uclo_map_uof_obj(handle, addr_ptr, mem_size); +} + void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int a; + if (handle->sobj_handle) + qat_uclo_del_suof(handle); if (!obj_handle) return; @@ -1055,7 +1526,7 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, encap_page->uwblock[i].words_num - 1) { raddr -= encap_page->uwblock[i].start_addr; raddr *= obj_handle->uword_in_bytes; - memcpy(&uwrd, (void *)(((unsigned long) + memcpy(&uwrd, (void *)(((uintptr_t) encap_page->uwblock[i].micro_words) + raddr), obj_handle->uword_in_bytes); uwrd = uwrd & 0xbffffffffffull; @@ -1147,7 +1618,33 @@ static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, } } -int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int i; + struct icp_qat_fw_auth_desc *desc = NULL; + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; + struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; + + for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { + if (qat_uclo_map_auth_fw(handle, + (char *)simg_hdr[i].simg_buf, + (unsigned int) + (simg_hdr[i].simg_len), + &desc)) + goto wr_err; + if (qat_uclo_auth_fw(handle, desc)) + goto wr_err; + if (qat_uclo_load_fw(handle, desc)) + goto wr_err; + qat_uclo_ummap_auth_fw(handle, &desc); + } + return 0; +wr_err: + qat_uclo_ummap_auth_fw(handle, &desc); + return -EINVAL; +} + +static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int i; @@ -1164,3 +1661,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) } return 0; } + +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +{ + return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : + qat_uclo_wr_uof_img(handle); +} diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 8c79c543740f..180a00ed7f89 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -1,5 +1,3 @@ ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o -qat_dh895xcc-objs := adf_drv.o \ - adf_isr.o \ - adf_dh895xcc_hw_data.o +qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index ff54257eced4..6e1d5e185526 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -48,7 +48,6 @@ #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> #include "adf_dh895xcc_hw_data.h" -#include "adf_drv.h" /* Worker thread to service arbiter mappings based on dev SKUs */ static const uint32_t thrd_to_arb_map_sku4[] = { @@ -143,8 +142,8 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self) return DEV_SKU_UNKNOWN; } -void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, - uint32_t const **arb_map_config) +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) { switch (accel_dev->accel_pci_dev.sku) { case DEV_SKU_1: diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 88dffb297346..092f7353ed23 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -53,7 +53,6 @@ #define ADF_DH895XCC_ETR_BAR 2 #define ADF_DH895XCC_RX_RINGS_OFFSET 8 #define ADF_DH895XCC_TX_RINGS_MASK 0xFF -#define ADF_DH895XCC_FUSECTL_OFFSET 0x40 #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 @@ -65,7 +64,6 @@ #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF -#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C #define ADF_DH895XCC_ETR_MAX_BANKS 32 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) @@ -80,11 +78,12 @@ #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) -#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) -#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) /* FW names */ #define ADF_DH895XCC_FW "qat_895xcc.bin" -#define ADF_DH895XCC_MMP "qat_mmp.bin" +#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin" + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index f8dd14f232c8..a8c4b92a7cbd 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -60,11 +60,7 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_cfg.h> -#include <adf_transport_access_macros.h> #include "adf_dh895xcc_hw_data.h" -#include "adf_drv.h" - -static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; #define ADF_SYSTEM_DEVICE(device_id) \ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} @@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, - .name = adf_driver_name, + .name = ADF_DH895XCC_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, .sriov_configure = adf_sriov_configure, @@ -120,87 +116,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_dev_configure(struct adf_accel_dev *accel_dev) -{ - int cpus = num_online_cpus(); - int banks = GET_MAX_BANKS(accel_dev); - int instances = min(cpus, banks); - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int i; - unsigned long val; - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - goto err; - if (adf_cfg_section_add(accel_dev, "Accelerator0")) - goto err; - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 8; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 10; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, (void *)&val, ADF_DEC)) - goto err; - } - - val = i; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); - return -EINVAL; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -253,15 +168,9 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } accel_dev->hw_device = hw_data; - switch (ent->device) { - case ADF_DH895XCC_PCI_DEVICE_ID: - adf_init_hw_data_dh895xcc(accel_dev->hw_device); - break; - default: - return -ENODEV; - } + adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ @@ -316,13 +225,13 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } - if (pci_request_regions(pdev, adf_driver_name)) { + if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) { ret = -EFAULT; goto out_err_disable; } /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &hw_data->accel_capabilities_mask); /* Find and map all the device's BARS */ @@ -357,7 +266,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_free_reg; } - ret = adf_dev_configure(accel_dev); + ret = qat_crypto_dev_config(accel_dev); if (ret) goto out_err_free_reg; diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile index 85399fcbbad4..5c3ccf8267eb 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile @@ -1,5 +1,3 @@ ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o -qat_dh895xccvf-objs := adf_drv.o \ - adf_isr.o \ - adf_dh895xccvf_hw_data.o +qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a9a27eff41fb..dc04ab68d24d 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -48,7 +48,6 @@ #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> #include "adf_dh895xccvf_hw_data.h" -#include "adf_drv.h" static struct adf_hw_device_class dh895xcciov_class = { .name = ADF_DH895XCCVF_DEVICE_NAME, @@ -136,7 +135,6 @@ static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &dh895xcciov_class; - hw_data->instance_id = dh895xcciov_class.instances++; hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; @@ -164,9 +162,12 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) hw_data->enable_ints = adf_vf_void_noop; hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); } void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); } diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 8f6babfef629..6ddc19bd4410 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -56,13 +56,9 @@ #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF #define ADF_DH895XCCIOV_ETR_BAR 0 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 - #define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 -#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0) - -#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204 -#define ADF_DH895XCC_VINTSOU_BUN BIT(0) -#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1) - #define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 789426f21882..f8cc4bf0a50c 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -60,11 +60,7 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_cfg.h> -#include <adf_transport_access_macros.h> #include "adf_dh895xccvf_hw_data.h" -#include "adf_drv.h" - -static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME; #define ADF_SYSTEM_DEVICE(device_id) \ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} @@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, - .name = adf_driver_name, + .name = ADF_DH895XCCVF_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, }; @@ -121,83 +117,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, pf); } -static int adf_dev_configure(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - unsigned long val, bank = 0; - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - goto err; - if (adf_cfg_section_add(accel_dev, "Accelerator0")) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&bank, ADF_DEC)) - goto err; - - val = bank; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0); - - val = 128; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&val, ADF_DEC)) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 8; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 10; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, - (int)bank); - if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, (void *)&val, ADF_DEC)) - goto err; - - val = 1; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n"); - return -EINVAL; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -243,14 +162,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } accel_dev->hw_device = hw_data; - switch (ent->device) { - case ADF_DH895XCCIOV_PCI_DEVICE_ID: - adf_init_hw_data_dh895xcciov(accel_dev->hw_device); - break; - default: - ret = -ENODEV; - goto out_err; - } + adf_init_hw_data_dh895xcciov(accel_dev->hw_device); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); @@ -295,7 +207,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } - if (pci_request_regions(pdev, adf_driver_name)) { + if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) { ret = -EFAULT; goto out_err_disable; } @@ -322,7 +234,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->vf.iov_msg_completion); - ret = adf_dev_configure(accel_dev); + ret = qat_crypto_dev_config(accel_dev); if (ret) goto out_err_free_reg; diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 2c0d63d48747..dbcbbe242bd6 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -83,6 +83,14 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); else rctx->dst_nents = rctx->src_nents; + if (rctx->src_nents < 0) { + dev_err(qce->dev, "Invalid numbers of src SG.\n"); + return rctx->src_nents; + } + if (rctx->dst_nents < 0) { + dev_err(qce->dev, "Invalid numbers of dst SG.\n"); + return -rctx->dst_nents; + } rctx->dst_nents += 1; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 0c9973ec80eb..47e114ac09d0 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -92,6 +92,11 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (rctx->src_nents < 0) { + dev_err(qce->dev, "Invalid numbers of src SG.\n"); + return rctx->src_nents; + } + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile new file mode 100644 index 000000000000..7051c6c715f3 --- /dev/null +++ b/drivers/crypto/rockchip/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o +rk_crypto-objs := rk3288_crypto.o \ + rk3288_crypto_ablkcipher.o \ diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c new file mode 100644 index 000000000000..da9c73dce4af --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -0,0 +1,394 @@ +/* + * Crypto acceleration support for Rockchip RK3288 + * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Zain Wang <zain.wang@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Some ideas are from marvell-cesa.c and s5p-sss.c driver. + */ + +#include "rk3288_crypto.h" +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/reset.h> + +static int rk_crypto_enable_clk(struct rk_crypto_info *dev) +{ + int err; + + err = clk_prepare_enable(dev->sclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", + __func__, __LINE__); + goto err_return; + } + err = clk_prepare_enable(dev->aclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", + __func__, __LINE__); + goto err_aclk; + } + err = clk_prepare_enable(dev->hclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", + __func__, __LINE__); + goto err_hclk; + } + err = clk_prepare_enable(dev->dmaclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", + __func__, __LINE__); + goto err_dmaclk; + } + return err; +err_dmaclk: + clk_disable_unprepare(dev->hclk); +err_hclk: + clk_disable_unprepare(dev->aclk); +err_aclk: + clk_disable_unprepare(dev->sclk); +err_return: + return err; +} + +static void rk_crypto_disable_clk(struct rk_crypto_info *dev) +{ + clk_disable_unprepare(dev->dmaclk); + clk_disable_unprepare(dev->hclk); + clk_disable_unprepare(dev->aclk); + clk_disable_unprepare(dev->sclk); +} + +static int check_alignment(struct scatterlist *sg_src, + struct scatterlist *sg_dst, + int align_mask) +{ + int in, out, align; + + in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && + IS_ALIGNED((uint32_t)sg_src->length, align_mask); + if (!sg_dst) + return in; + out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && + IS_ALIGNED((uint32_t)sg_dst->length, align_mask); + align = in && out; + + return (align && (sg_src->length == sg_dst->length)); +} + +static int rk_load_data(struct rk_crypto_info *dev, + struct scatterlist *sg_src, + struct scatterlist *sg_dst) +{ + unsigned int count; + + dev->aligned = dev->aligned ? + check_alignment(sg_src, sg_dst, dev->align_size) : + dev->aligned; + if (dev->aligned) { + count = min(dev->left_bytes, sg_src->length); + dev->left_bytes -= count; + + if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { + dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", + __func__, __LINE__); + return -EINVAL; + } + dev->addr_in = sg_dma_address(sg_src); + + if (sg_dst) { + if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { + dev_err(dev->dev, + "[%s:%d] dma_map_sg(dst) error\n", + __func__, __LINE__); + dma_unmap_sg(dev->dev, sg_src, 1, + DMA_TO_DEVICE); + return -EINVAL; + } + dev->addr_out = sg_dma_address(sg_dst); + } + } else { + count = (dev->left_bytes > PAGE_SIZE) ? + PAGE_SIZE : dev->left_bytes; + + if (!sg_pcopy_to_buffer(dev->first, dev->nents, + dev->addr_vir, count, + dev->total - dev->left_bytes)) { + dev_err(dev->dev, "[%s:%d] pcopy err\n", + __func__, __LINE__); + return -EINVAL; + } + dev->left_bytes -= count; + sg_init_one(&dev->sg_tmp, dev->addr_vir, count); + if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { + dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", + __func__, __LINE__); + return -ENOMEM; + } + dev->addr_in = sg_dma_address(&dev->sg_tmp); + + if (sg_dst) { + if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, + DMA_FROM_DEVICE)) { + dev_err(dev->dev, + "[%s:%d] dma_map_sg(sg_tmp) error\n", + __func__, __LINE__); + dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, + DMA_TO_DEVICE); + return -ENOMEM; + } + dev->addr_out = sg_dma_address(&dev->sg_tmp); + } + } + dev->count = count; + return 0; +} + +static void rk_unload_data(struct rk_crypto_info *dev) +{ + struct scatterlist *sg_in, *sg_out; + + sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; + dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); + + if (dev->sg_dst) { + sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; + dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); + } +} + +static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) +{ + struct rk_crypto_info *dev = platform_get_drvdata(dev_id); + u32 interrupt_status; + int err = 0; + + spin_lock(&dev->lock); + interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + if (interrupt_status & 0x0a) { + dev_warn(dev->dev, "DMA Error\n"); + err = -EFAULT; + } else if (interrupt_status & 0x05) { + err = dev->update(dev); + } + if (err) + dev->complete(dev, err); + spin_unlock(&dev->lock); + return IRQ_HANDLED; +} + +static void rk_crypto_tasklet_cb(unsigned long data) +{ + struct rk_crypto_info *dev = (struct rk_crypto_info *)data; + struct crypto_async_request *async_req, *backlog; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&dev->lock, flags); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + spin_unlock_irqrestore(&dev->lock, flags); + if (!async_req) { + dev_err(dev->dev, "async_req is NULL !!\n"); + return; + } + if (backlog) { + backlog->complete(backlog, -EINPROGRESS); + backlog = NULL; + } + + if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) + dev->ablk_req = ablkcipher_request_cast(async_req); + err = dev->start(dev); + if (err) + dev->complete(dev, err); +} + +static struct rk_crypto_tmp *rk_cipher_algs[] = { + &rk_ecb_aes_alg, + &rk_cbc_aes_alg, + &rk_ecb_des_alg, + &rk_cbc_des_alg, + &rk_ecb_des3_ede_alg, + &rk_cbc_des3_ede_alg, +}; + +static int rk_crypto_register(struct rk_crypto_info *crypto_info) +{ + unsigned int i, k; + int err = 0; + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + rk_cipher_algs[i]->dev = crypto_info; + err = crypto_register_alg(&rk_cipher_algs[i]->alg); + if (err) + goto err_cipher_algs; + } + return 0; + +err_cipher_algs: + for (k = 0; k < i; k++) + crypto_unregister_alg(&rk_cipher_algs[k]->alg); + return err; +} + +static void rk_crypto_unregister(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) + crypto_unregister_alg(&rk_cipher_algs[i]->alg); +} + +static void rk_crypto_action(void *data) +{ + struct rk_crypto_info *crypto_info = data; + + reset_control_assert(crypto_info->rst); +} + +static const struct of_device_id crypto_of_id_table[] = { + { .compatible = "rockchip,rk3288-crypto" }, + {} +}; +MODULE_DEVICE_TABLE(of, crypto_of_id_table); + +static int rk_crypto_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct rk_crypto_info *crypto_info; + int err = 0; + + crypto_info = devm_kzalloc(&pdev->dev, + sizeof(*crypto_info), GFP_KERNEL); + if (!crypto_info) { + err = -ENOMEM; + goto err_crypto; + } + + crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); + if (IS_ERR(crypto_info->rst)) { + err = PTR_ERR(crypto_info->rst); + goto err_crypto; + } + + reset_control_assert(crypto_info->rst); + usleep_range(10, 20); + reset_control_deassert(crypto_info->rst); + + err = devm_add_action(dev, rk_crypto_action, crypto_info); + if (err) { + reset_control_assert(crypto_info->rst); + goto err_crypto; + } + + spin_lock_init(&crypto_info->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(crypto_info->reg)) { + err = PTR_ERR(crypto_info->reg); + goto err_crypto; + } + + crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); + if (IS_ERR(crypto_info->aclk)) { + err = PTR_ERR(crypto_info->aclk); + goto err_crypto; + } + + crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(crypto_info->hclk)) { + err = PTR_ERR(crypto_info->hclk); + goto err_crypto; + } + + crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); + if (IS_ERR(crypto_info->sclk)) { + err = PTR_ERR(crypto_info->sclk); + goto err_crypto; + } + + crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(crypto_info->dmaclk)) { + err = PTR_ERR(crypto_info->dmaclk); + goto err_crypto; + } + + crypto_info->irq = platform_get_irq(pdev, 0); + if (crypto_info->irq < 0) { + dev_warn(crypto_info->dev, + "control Interrupt is not available.\n"); + err = crypto_info->irq; + goto err_crypto; + } + + err = devm_request_irq(&pdev->dev, crypto_info->irq, + rk_crypto_irq_handle, IRQF_SHARED, + "rk-crypto", pdev); + + if (err) { + dev_err(crypto_info->dev, "irq request failed.\n"); + goto err_crypto; + } + + crypto_info->dev = &pdev->dev; + platform_set_drvdata(pdev, crypto_info); + + tasklet_init(&crypto_info->crypto_tasklet, + rk_crypto_tasklet_cb, (unsigned long)crypto_info); + crypto_init_queue(&crypto_info->queue, 50); + + crypto_info->enable_clk = rk_crypto_enable_clk; + crypto_info->disable_clk = rk_crypto_disable_clk; + crypto_info->load_data = rk_load_data; + crypto_info->unload_data = rk_unload_data; + + err = rk_crypto_register(crypto_info); + if (err) { + dev_err(dev, "err in register alg"); + goto err_register_alg; + } + + dev_info(dev, "Crypto Accelerator successfully registered\n"); + return 0; + +err_register_alg: + tasklet_kill(&crypto_info->crypto_tasklet); +err_crypto: + return err; +} + +static int rk_crypto_remove(struct platform_device *pdev) +{ + struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); + + rk_crypto_unregister(); + tasklet_kill(&crypto_tmp->crypto_tasklet); + return 0; +} + +static struct platform_driver crypto_driver = { + .probe = rk_crypto_probe, + .remove = rk_crypto_remove, + .driver = { + .name = "rk3288-crypto", + .of_match_table = crypto_of_id_table, + }, +}; + +module_platform_driver(crypto_driver); + +MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); +MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h new file mode 100644 index 000000000000..e499c2c6c903 --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -0,0 +1,216 @@ +#ifndef __RK3288_CRYPTO_H__ +#define __RK3288_CRYPTO_H__ + +#include <crypto/aes.h> +#include <crypto/des.h> +#include <crypto/algapi.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +#define _SBF(v, f) ((v) << (f)) + +/* Crypto control registers*/ +#define RK_CRYPTO_INTSTS 0x0000 +#define RK_CRYPTO_PKA_DONE_INT BIT(5) +#define RK_CRYPTO_HASH_DONE_INT BIT(4) +#define RK_CRYPTO_HRDMA_ERR_INT BIT(3) +#define RK_CRYPTO_HRDMA_DONE_INT BIT(2) +#define RK_CRYPTO_BCDMA_ERR_INT BIT(1) +#define RK_CRYPTO_BCDMA_DONE_INT BIT(0) + +#define RK_CRYPTO_INTENA 0x0004 +#define RK_CRYPTO_PKA_DONE_ENA BIT(5) +#define RK_CRYPTO_HASH_DONE_ENA BIT(4) +#define RK_CRYPTO_HRDMA_ERR_ENA BIT(3) +#define RK_CRYPTO_HRDMA_DONE_ENA BIT(2) +#define RK_CRYPTO_BCDMA_ERR_ENA BIT(1) +#define RK_CRYPTO_BCDMA_DONE_ENA BIT(0) + +#define RK_CRYPTO_CTRL 0x0008 +#define RK_CRYPTO_WRITE_MASK _SBF(0xFFFF, 16) +#define RK_CRYPTO_TRNG_FLUSH BIT(9) +#define RK_CRYPTO_TRNG_START BIT(8) +#define RK_CRYPTO_PKA_FLUSH BIT(7) +#define RK_CRYPTO_HASH_FLUSH BIT(6) +#define RK_CRYPTO_BLOCK_FLUSH BIT(5) +#define RK_CRYPTO_PKA_START BIT(4) +#define RK_CRYPTO_HASH_START BIT(3) +#define RK_CRYPTO_BLOCK_START BIT(2) +#define RK_CRYPTO_TDES_START BIT(1) +#define RK_CRYPTO_AES_START BIT(0) + +#define RK_CRYPTO_CONF 0x000c +/* HASH Receive DMA Address Mode: fix | increment */ +#define RK_CRYPTO_HR_ADDR_MODE BIT(8) +/* Block Transmit DMA Address Mode: fix | increment */ +#define RK_CRYPTO_BT_ADDR_MODE BIT(7) +/* Block Receive DMA Address Mode: fix | increment */ +#define RK_CRYPTO_BR_ADDR_MODE BIT(6) +#define RK_CRYPTO_BYTESWAP_HRFIFO BIT(5) +#define RK_CRYPTO_BYTESWAP_BTFIFO BIT(4) +#define RK_CRYPTO_BYTESWAP_BRFIFO BIT(3) +/* AES = 0 OR DES = 1 */ +#define RK_CRYPTO_DESSEL BIT(2) +#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE _SBF(0x00, 0) +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT _SBF(0x01, 0) +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT _SBF(0x02, 0) + +/* Block Receiving DMA Start Address Register */ +#define RK_CRYPTO_BRDMAS 0x0010 +/* Block Transmitting DMA Start Address Register */ +#define RK_CRYPTO_BTDMAS 0x0014 +/* Block Receiving DMA Length Register */ +#define RK_CRYPTO_BRDMAL 0x0018 +/* Hash Receiving DMA Start Address Register */ +#define RK_CRYPTO_HRDMAS 0x001c +/* Hash Receiving DMA Length Register */ +#define RK_CRYPTO_HRDMAL 0x0020 + +/* AES registers */ +#define RK_CRYPTO_AES_CTRL 0x0080 +#define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11) +#define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10) +#define RK_CRYPTO_AES_BYTESWAP_IV BIT(9) +#define RK_CRYPTO_AES_BYTESWAP_DO BIT(8) +#define RK_CRYPTO_AES_BYTESWAP_DI BIT(7) +#define RK_CRYPTO_AES_KEY_CHANGE BIT(6) +#define RK_CRYPTO_AES_ECB_MODE _SBF(0x00, 4) +#define RK_CRYPTO_AES_CBC_MODE _SBF(0x01, 4) +#define RK_CRYPTO_AES_CTR_MODE _SBF(0x02, 4) +#define RK_CRYPTO_AES_128BIT_key _SBF(0x00, 2) +#define RK_CRYPTO_AES_192BIT_key _SBF(0x01, 2) +#define RK_CRYPTO_AES_256BIT_key _SBF(0x02, 2) +/* Slave = 0 / fifo = 1 */ +#define RK_CRYPTO_AES_FIFO_MODE BIT(1) +/* Encryption = 0 , Decryption = 1 */ +#define RK_CRYPTO_AES_DEC BIT(0) + +#define RK_CRYPTO_AES_STS 0x0084 +#define RK_CRYPTO_AES_DONE BIT(0) + +/* AES Input Data 0-3 Register */ +#define RK_CRYPTO_AES_DIN_0 0x0088 +#define RK_CRYPTO_AES_DIN_1 0x008c +#define RK_CRYPTO_AES_DIN_2 0x0090 +#define RK_CRYPTO_AES_DIN_3 0x0094 + +/* AES output Data 0-3 Register */ +#define RK_CRYPTO_AES_DOUT_0 0x0098 +#define RK_CRYPTO_AES_DOUT_1 0x009c +#define RK_CRYPTO_AES_DOUT_2 0x00a0 +#define RK_CRYPTO_AES_DOUT_3 0x00a4 + +/* AES IV Data 0-3 Register */ +#define RK_CRYPTO_AES_IV_0 0x00a8 +#define RK_CRYPTO_AES_IV_1 0x00ac +#define RK_CRYPTO_AES_IV_2 0x00b0 +#define RK_CRYPTO_AES_IV_3 0x00b4 + +/* AES Key Data 0-3 Register */ +#define RK_CRYPTO_AES_KEY_0 0x00b8 +#define RK_CRYPTO_AES_KEY_1 0x00bc +#define RK_CRYPTO_AES_KEY_2 0x00c0 +#define RK_CRYPTO_AES_KEY_3 0x00c4 +#define RK_CRYPTO_AES_KEY_4 0x00c8 +#define RK_CRYPTO_AES_KEY_5 0x00cc +#define RK_CRYPTO_AES_KEY_6 0x00d0 +#define RK_CRYPTO_AES_KEY_7 0x00d4 + +/* des/tdes */ +#define RK_CRYPTO_TDES_CTRL 0x0100 +#define RK_CRYPTO_TDES_BYTESWAP_KEY BIT(8) +#define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7) +#define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6) +#define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5) +/* 0: ECB, 1: CBC */ +#define RK_CRYPTO_TDES_CHAINMODE_CBC BIT(4) +/* TDES Key Mode, 0 : EDE, 1 : EEE */ +#define RK_CRYPTO_TDES_EEE BIT(3) +/* 0: DES, 1:TDES */ +#define RK_CRYPTO_TDES_SELECT BIT(2) +/* 0: Slave, 1:Fifo */ +#define RK_CRYPTO_TDES_FIFO_MODE BIT(1) +/* Encryption = 0 , Decryption = 1 */ +#define RK_CRYPTO_TDES_DEC BIT(0) + +#define RK_CRYPTO_TDES_STS 0x0104 +#define RK_CRYPTO_TDES_DONE BIT(0) + +#define RK_CRYPTO_TDES_DIN_0 0x0108 +#define RK_CRYPTO_TDES_DIN_1 0x010c +#define RK_CRYPTO_TDES_DOUT_0 0x0110 +#define RK_CRYPTO_TDES_DOUT_1 0x0114 +#define RK_CRYPTO_TDES_IV_0 0x0118 +#define RK_CRYPTO_TDES_IV_1 0x011c +#define RK_CRYPTO_TDES_KEY1_0 0x0120 +#define RK_CRYPTO_TDES_KEY1_1 0x0124 +#define RK_CRYPTO_TDES_KEY2_0 0x0128 +#define RK_CRYPTO_TDES_KEY2_1 0x012c +#define RK_CRYPTO_TDES_KEY3_0 0x0130 +#define RK_CRYPTO_TDES_KEY3_1 0x0134 + +#define CRYPTO_READ(dev, offset) \ + readl_relaxed(((dev)->reg + (offset))) +#define CRYPTO_WRITE(dev, offset, val) \ + writel_relaxed((val), ((dev)->reg + (offset))) + +struct rk_crypto_info { + struct device *dev; + struct clk *aclk; + struct clk *hclk; + struct clk *sclk; + struct clk *dmaclk; + struct reset_control *rst; + void __iomem *reg; + int irq; + struct crypto_queue queue; + struct tasklet_struct crypto_tasklet; + struct ablkcipher_request *ablk_req; + /* device lock */ + spinlock_t lock; + + /* the public variable */ + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + struct scatterlist sg_tmp; + struct scatterlist *first; + unsigned int left_bytes; + void *addr_vir; + int aligned; + int align_size; + size_t nents; + unsigned int total; + unsigned int count; + u32 mode; + dma_addr_t addr_in; + dma_addr_t addr_out; + int (*start)(struct rk_crypto_info *dev); + int (*update)(struct rk_crypto_info *dev); + void (*complete)(struct rk_crypto_info *dev, int err); + int (*enable_clk)(struct rk_crypto_info *dev); + void (*disable_clk)(struct rk_crypto_info *dev); + int (*load_data)(struct rk_crypto_info *dev, + struct scatterlist *sg_src, + struct scatterlist *sg_dst); + void (*unload_data)(struct rk_crypto_info *dev); +}; + +/* the private variable of cipher */ +struct rk_cipher_ctx { + struct rk_crypto_info *dev; + unsigned int keylen; +}; + +struct rk_crypto_tmp { + struct rk_crypto_info *dev; + struct crypto_alg alg; +}; + +extern struct rk_crypto_tmp rk_ecb_aes_alg; +extern struct rk_crypto_tmp rk_cbc_aes_alg; +extern struct rk_crypto_tmp rk_ecb_des_alg; +extern struct rk_crypto_tmp rk_cbc_des_alg; +extern struct rk_crypto_tmp rk_ecb_des3_ede_alg; +extern struct rk_crypto_tmp rk_cbc_des3_ede_alg; + +#endif diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c new file mode 100644 index 000000000000..d98b681f6c06 --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -0,0 +1,505 @@ +/* + * Crypto acceleration support for Rockchip RK3288 + * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Zain Wang <zain.wang@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Some ideas are from marvell-cesa.c and s5p-sss.c driver. + */ +#include "rk3288_crypto.h" + +#define RK_CRYPTO_DEC BIT(0) + +static void rk_crypto_complete(struct rk_crypto_info *dev, int err) +{ + if (dev->ablk_req->base.complete) + dev->ablk_req->base.complete(&dev->ablk_req->base, err); +} + +static int rk_handle_req(struct rk_crypto_info *dev, + struct ablkcipher_request *req) +{ + unsigned long flags; + int err; + + if (!IS_ALIGNED(req->nbytes, dev->align_size)) + return -EINVAL; + + dev->left_bytes = req->nbytes; + dev->total = req->nbytes; + dev->sg_src = req->src; + dev->first = req->src; + dev->nents = sg_nents(req->src); + dev->sg_dst = req->dst; + dev->aligned = 1; + dev->ablk_req = req; + + spin_lock_irqsave(&dev->lock, flags); + err = ablkcipher_enqueue_request(&dev->queue, req); + spin_unlock_irqrestore(&dev->lock, flags); + tasklet_schedule(&dev->crypto_tasklet); + return err; +} + +static int rk_aes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); + return 0; +} + +static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + + if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (keylen == DES_KEY_SIZE) { + if (!des_ekey(tmp, key) && + (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + } + + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); + return 0; +} + +static int rk_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_ECB_MODE; + return rk_handle_req(dev, req); +} + +static int rk_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_CBC_MODE; + return rk_handle_req(dev, req); +} + +static int rk_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = 0; + return rk_handle_req(dev, req); +} + +static int rk_des_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); +} + +static int rk_des_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static void rk_ablk_hw_init(struct rk_crypto_info *dev) +{ + struct crypto_ablkcipher *cipher = + crypto_ablkcipher_reqtfm(dev->ablk_req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 ivsize, block, conf_reg = 0; + + block = crypto_tfm_alg_blocksize(tfm); + ivsize = crypto_ablkcipher_ivsize(cipher); + + if (block == DES_BLOCK_SIZE) { + dev->mode |= RK_CRYPTO_TDES_FIFO_MODE | + RK_CRYPTO_TDES_BYTESWAP_KEY | + RK_CRYPTO_TDES_BYTESWAP_IV; + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, + dev->ablk_req->info, ivsize); + conf_reg = RK_CRYPTO_DESSEL; + } else { + dev->mode |= RK_CRYPTO_AES_FIFO_MODE | + RK_CRYPTO_AES_KEY_CHANGE | + RK_CRYPTO_AES_BYTESWAP_KEY | + RK_CRYPTO_AES_BYTESWAP_IV; + if (ctx->keylen == AES_KEYSIZE_192) + dev->mode |= RK_CRYPTO_AES_192BIT_key; + else if (ctx->keylen == AES_KEYSIZE_256) + dev->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, + dev->ablk_req->info, ivsize); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | + RK_CRYPTO_BYTESWAP_BRFIFO; + CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg); + CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, + RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); +} + +static void crypto_dma_start(struct rk_crypto_info *dev) +{ + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | + _SBF(RK_CRYPTO_BLOCK_START, 16)); +} + +static int rk_set_data_start(struct rk_crypto_info *dev) +{ + int err; + + err = dev->load_data(dev, dev->sg_src, dev->sg_dst); + if (!err) + crypto_dma_start(dev); + return err; +} + +static int rk_ablk_start(struct rk_crypto_info *dev) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&dev->lock, flags); + rk_ablk_hw_init(dev); + err = rk_set_data_start(dev); + spin_unlock_irqrestore(&dev->lock, flags); + return err; +} + +static void rk_iv_copyback(struct rk_crypto_info *dev) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + + if (ivsize == DES_BLOCK_SIZE) + memcpy_fromio(dev->ablk_req->info, + dev->reg + RK_CRYPTO_TDES_IV_0, ivsize); + else if (ivsize == AES_BLOCK_SIZE) + memcpy_fromio(dev->ablk_req->info, + dev->reg + RK_CRYPTO_AES_IV_0, ivsize); +} + +/* return: + * true some err was occurred + * fault no err, continue + */ +static int rk_ablk_rx(struct rk_crypto_info *dev) +{ + int err = 0; + + dev->unload_data(dev); + if (!dev->aligned) { + if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents, + dev->addr_vir, dev->count, + dev->total - dev->left_bytes - + dev->count)) { + err = -EINVAL; + goto out_rx; + } + } + if (dev->left_bytes) { + if (dev->aligned) { + if (sg_is_last(dev->sg_src)) { + dev_err(dev->dev, "[%s:%d] Lack of data\n", + __func__, __LINE__); + err = -ENOMEM; + goto out_rx; + } + dev->sg_src = sg_next(dev->sg_src); + dev->sg_dst = sg_next(dev->sg_dst); + } + err = rk_set_data_start(dev); + } else { + rk_iv_copyback(dev); + /* here show the calculation is over without any err */ + dev->complete(dev, 0); + } +out_rx: + return err; +} + +static int rk_ablk_cra_init(struct crypto_tfm *tfm) +{ + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct rk_crypto_tmp *algt; + + algt = container_of(alg, struct rk_crypto_tmp, alg); + + ctx->dev = algt->dev; + ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1; + ctx->dev->start = rk_ablk_start; + ctx->dev->update = rk_ablk_rx; + ctx->dev->complete = rk_crypto_complete; + ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); + + return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; +} + +static void rk_ablk_cra_exit(struct crypto_tfm *tfm) +{ + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + free_page((unsigned long)ctx->dev->addr_vir); + ctx->dev->disable_clk(ctx->dev); +} + +struct rk_crypto_tmp rk_ecb_aes_alg = { + .alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = rk_aes_setkey, + .encrypt = rk_aes_ecb_encrypt, + .decrypt = rk_aes_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_aes_alg = { + .alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = rk_aes_setkey, + .encrypt = rk_aes_cbc_encrypt, + .decrypt = rk_aes_cbc_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_ecb_des_alg = { + .alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des_ecb_encrypt, + .decrypt = rk_des_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_des_alg = { + .alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des_cbc_encrypt, + .decrypt = rk_des_cbc_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_ecb_des3_ede_alg = { + .alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-ede-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des3_ede_ecb_encrypt, + .decrypt = rk_des3_ede_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_des3_ede_alg = { + .alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-ede-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des3_ede_cbc_encrypt, + .decrypt = rk_des3_ede_cbc_decrypt, + } + } +}; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index f68c24a98277..6c4f91c5e6b3 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -130,18 +130,18 @@ #define SAHARA_REG_IDAR 0x20 struct sahara_hw_desc { - u32 hdr; - u32 len1; - dma_addr_t p1; - u32 len2; - dma_addr_t p2; - dma_addr_t next; + u32 hdr; + u32 len1; + u32 p1; + u32 len2; + u32 p2; + u32 next; }; struct sahara_hw_link { - u32 len; - dma_addr_t p; - dma_addr_t next; + u32 len; + u32 p; + u32 next; }; struct sahara_ctx { @@ -228,9 +228,9 @@ struct sahara_dev { size_t total; struct scatterlist *in_sg; - unsigned int nb_in_sg; + int nb_in_sg; struct scatterlist *out_sg; - unsigned int nb_out_sg; + int nb_out_sg; u32 error; }; @@ -416,8 +416,8 @@ static void sahara_dump_descriptors(struct sahara_dev *dev) return; for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { - dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", - i, dev->hw_phys_desc[i]); + dev_dbg(dev->device, "Descriptor (%d) (%pad):\n", + i, &dev->hw_phys_desc[i]); dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); @@ -437,8 +437,8 @@ static void sahara_dump_links(struct sahara_dev *dev) return; for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { - dev_dbg(dev->device, "Link (%d) (0x%08x):\n", - i, dev->hw_phys_link[i]); + dev_dbg(dev->device, "Link (%d) (%pad):\n", + i, &dev->hw_phys_link[i]); dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); dev_dbg(dev->device, "\tnext = 0x%08x\n", @@ -477,7 +477,15 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) } dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); + if (dev->nb_in_sg < 0) { + dev_err(dev->device, "Invalid numbers of src SG.\n"); + return dev->nb_in_sg; + } dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total); + if (dev->nb_out_sg < 0) { + dev_err(dev->device, "Invalid numbers of dst SG.\n"); + return dev->nb_out_sg; + } if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { dev_err(dev->device, "not enough hw links (%d)\n", dev->nb_in_sg + dev->nb_out_sg); @@ -793,6 +801,10 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev, dev->in_sg = rctx->in_sg; dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total); + if (dev->nb_in_sg < 0) { + dev_err(dev->device, "Invalid numbers of src SG.\n"); + return dev->nb_in_sg; + } if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { dev_err(dev->device, "not enough hw links (%d)\n", dev->nb_in_sg + dev->nb_out_sg); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index eab6fe227fa0..107cd2a41cae 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { .import = sun4i_hash_import_md5, .halg = { .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-sun4i-ss", @@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { .import = sun4i_hash_import_sha1, .halg = { .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-sun4i-ss", diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index b6f9f42e2985..a0d4a08313ae 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1216,6 +1216,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; + void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); @@ -1228,14 +1229,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, if (!dst || dst == src) { src_nents = sg_nents_for_len(src, assoclen + cryptlen + authsize); + if (src_nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; } else { /* dst && dst != src*/ src_nents = sg_nents_for_len(src, assoclen + cryptlen + (encrypt ? 0 : authsize)); + if (src_nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + (encrypt ? authsize : 0)); + if (dst_nents < 0) { + dev_err(dev, "Invalid number of dst SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1260,11 +1276,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - dev_err(dev, "could not allocate edescriptor\n"); - return ERR_PTR(-ENOMEM); + err = ERR_PTR(-ENOMEM); + goto error_sg; } edesc->src_nents = src_nents; @@ -1277,6 +1291,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); return edesc; +error_sg: + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, @@ -1830,11 +1848,16 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) unsigned int nbytes_to_hash; unsigned int to_hash_later; unsigned int nsg; + int nents; if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { /* Buffer up to one whole block */ - sg_copy_to_buffer(areq->src, - sg_nents_for_len(areq->src, nbytes), + nents = sg_nents_for_len(areq->src, nbytes); + if (nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return nents; + } + sg_copy_to_buffer(areq->src, nents, req_ctx->buf + req_ctx->nbuf, nbytes); req_ctx->nbuf += nbytes; return 0; @@ -1867,7 +1890,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) req_ctx->psrc = areq->src; if (to_hash_later) { - int nents = sg_nents_for_len(areq->src, nbytes); + nents = sg_nents_for_len(areq->src, nbytes); + if (nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return nents; + } sg_pcopy_to_buffer(areq->src, nents, req_ctx->bufnext, to_hash_later, @@ -2297,6 +2324,22 @@ static struct talitos_alg_template driver_algs[] = { /* ABLKCIPHER algorithms. */ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -2314,6 +2357,73 @@ static struct talitos_alg_template driver_algs[] = { }, { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CTR, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-talitos", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-talitos", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_3DES, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 0090f3211d68..8dd8f40e2771 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -345,6 +345,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* primary execution unit mode (MODE0) and derivatives */ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index 30796441b0a6..0e338bf6dfb7 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -18,6 +18,8 @@ config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" depends on CRYPTO_DEV_UX500 select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 help This selects the hash driver for the UX500_HASH hardware. Depends on UX500/STM DMA if running in DMA mode. diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index f47d112041b2..d6fdc583ce5d 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -41,22 +41,6 @@ static int hash_mode; module_param(hash_mode, int, 0); MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); -/** - * Pre-calculated empty message digests. - */ -static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, - 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, - 0xaf, 0xd8, 0x07, 0x09 -}; - -static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; - /* HMAC-SHA1, no key */ static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, @@ -242,13 +226,13 @@ static int get_empty_message_digest( if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { if (HASH_ALGO_SHA1 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hash_sha1[0], + memcpy(zero_hash, &sha1_zero_message_hash[0], SHA1_DIGEST_SIZE); *zero_hash_size = SHA1_DIGEST_SIZE; *zero_digest = true; } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hash_sha256[0], + memcpy(zero_hash, &sha256_zero_message_hash[0], SHA256_DIGEST_SIZE); *zero_hash_size = SHA256_DIGEST_SIZE; *zero_digest = true; diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2ec5315..78a978613ca8 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = { .cra_init = p8_aes_cbc_init, .cra_exit = p8_aes_cbc_exit, .cra_blkcipher = { - .ivsize = 0, + .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_cbc_setkey, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306cd8f59..1febc4f1d9af 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = { .cra_init = p8_aes_ctr_init, .cra_exit = p8_aes_ctr_exit, .cra_blkcipher = { - .ivsize = 0, + .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_ctr_setkey, diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7f039de143f0..370c661c7d7b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -156,7 +156,7 @@ #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) -#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ +#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ @@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, NULL, src_addr, dst_addr, xt, xt->sgl); - for (i = 0; i < xt->numf; i++) + + /* Length of the block is (BLEN+1) microblocks. */ + for (i = 0; i < xt->numf - 1; i++) at_xdmac_increment_block_count(chan, first); dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", @@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, /* Check remaining length and change data width if needed. */ dwidth = at_xdmac_align_width(chan, src_addr | dst_addr | xfer_size); + chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK; chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); ublen = xfer_size >> dwidth; @@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, * since we don't care about the stride anymore. */ if ((i == (sg_len - 1)) && - sg_dma_len(ppsg) == sg_dma_len(psg)) { + sg_dma_len(psg) == sg_dma_len(sg)) { dev_dbg(chan2dev(chan), "%s: desc 0x%p can be merged with desc 0x%p\n", __func__, desc, pdesc); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index c92d6a70ccf3..996c4b00d323 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -31,6 +31,7 @@ */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -62,6 +63,11 @@ struct bcm2835_dma_cb { uint32_t pad[2]; }; +struct bcm2835_cb_entry { + struct bcm2835_dma_cb *cb; + dma_addr_t paddr; +}; + struct bcm2835_chan { struct virt_dma_chan vc; struct list_head node; @@ -72,18 +78,18 @@ struct bcm2835_chan { int ch; struct bcm2835_desc *desc; + struct dma_pool *cb_pool; void __iomem *chan_base; int irq_number; }; struct bcm2835_desc { + struct bcm2835_chan *c; struct virt_dma_desc vd; enum dma_transfer_direction dir; - unsigned int control_block_size; - struct bcm2835_dma_cb *control_block_base; - dma_addr_t control_block_base_phys; + struct bcm2835_cb_entry *cb_list; unsigned int frames; size_t size; @@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) { struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); - dma_free_coherent(desc->vd.tx.chan->device->dev, - desc->control_block_size, - desc->control_block_base, - desc->control_block_base_phys); + int i; + + for (i = 0; i < desc->frames; i++) + dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, + desc->cb_list[i].paddr); + + kfree(desc->cb_list); kfree(desc); } @@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) c->desc = d = to_bcm2835_dma_desc(&vd->tx); - writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); + writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); } @@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + struct device *dev = c->vc.chan.device->dev; + + dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); - dev_dbg(c->vc.chan.device->dev, - "Allocating DMA channel %d\n", c->ch); + c->cb_pool = dma_pool_create(dev_name(dev), dev, + sizeof(struct bcm2835_dma_cb), 0, 0); + if (!c->cb_pool) { + dev_err(dev, "unable to allocate descriptor pool\n"); + return -ENOMEM; + } return request_irq(c->irq_number, bcm2835_dma_callback, 0, "DMA IRQ", c); @@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); free_irq(c->irq_number, c); + dma_pool_destroy(c->cb_pool); dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); } @@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) size_t size; for (size = i = 0; i < d->frames; i++) { - struct bcm2835_dma_cb *control_block = - &d->control_block_base[i]; + struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; size_t this_size = control_block->length; dma_addr_t dma; @@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( dma_addr_t dev_addr; unsigned int es, sync_type; unsigned int frame; + int i; /* Grab configuration */ if (!is_slave_direction(direction)) { @@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( if (!d) return NULL; + d->c = c; d->dir = direction; d->frames = buf_len / period_len; - /* Allocate memory for control blocks */ - d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); - d->control_block_base = dma_zalloc_coherent(chan->device->dev, - d->control_block_size, &d->control_block_base_phys, - GFP_NOWAIT); - - if (!d->control_block_base) { + d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); + if (!d->cb_list) { kfree(d); return NULL; } + /* Allocate memory for control blocks */ + for (i = 0; i < d->frames; i++) { + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; + + cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, + &cb_entry->paddr); + if (!cb_entry->cb) + goto error_cb; + } /* * Iterate over all frames, create a control block * for each frame and link them together. */ for (frame = 0; frame < d->frames; frame++) { - struct bcm2835_dma_cb *control_block = - &d->control_block_base[frame]; + struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; /* Setup adresses */ if (d->dir == DMA_DEV_TO_MEM) { @@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( * This DMA engine driver currently only supports cyclic DMA. * Therefore, wrap around at number of frames. */ - control_block->next = d->control_block_base_phys + - sizeof(struct bcm2835_dma_cb) - * ((frame + 1) % d->frames); + control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; } return vchan_tx_prep(&c->vc, &d->vd, flags); +error_cb: + i--; + for (; i >= 0; i--) { + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; + + dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); + } + + kfree(d->cb_list); + kfree(d); + return NULL; } static int bcm2835_dma_slave_config(struct dma_chan *chan, diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 7067b6ddc1db..8b20930ade98 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -622,12 +622,17 @@ static void dw_dma_tasklet(unsigned long data) static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) { struct dw_dma *dw = dev_id; - u32 status = dma_readl(dw, STATUS_INT); + u32 status; + /* Check if we have any interrupt from the DMAC which is not in use */ + if (!dw->in_use) + return IRQ_NONE; + + status = dma_readl(dw, STATUS_INT); dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); /* Check if we have any interrupt from the DMAC */ - if (!status || !dw->in_use) + if (!status) return IRQ_NONE; /* diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 68a4815750b5..127093a0c0e8 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -155,7 +155,6 @@ static int dw_probe(struct platform_device *pdev) struct dw_dma_chip *chip; struct device *dev = &pdev->dev; struct resource *mem; - const struct acpi_device_id *id; struct dw_dma_platform_data *pdata; int err; @@ -179,11 +178,6 @@ static int dw_probe(struct platform_device *pdev) pdata = dev_get_platdata(dev); if (!pdata) pdata = dw_dma_parse_dt(pdev); - if (!pdata && has_acpi_companion(dev)) { - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (id) - pdata = (struct dw_dma_platform_data *)id->driver_data; - } chip->dev = dev; @@ -239,7 +233,19 @@ static void dw_shutdown(struct platform_device *pdev) { struct dw_dma_chip *chip = platform_get_drvdata(pdev); + /* + * We have to call dw_dma_disable() to stop any ongoing transfer. On + * some platforms we can't do that since DMA device is powered off. + * Moreover we have no possibility to check if the platform is affected + * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() + * unconditionally. On the other hand we can't use + * pm_runtime_suspended() because runtime PM framework is not fully + * used by the driver. + */ + pm_runtime_get_sync(chip->dev); dw_dma_disable(chip); + pm_runtime_put_sync_suspend(chip->dev); + clk_disable_unprepare(chip->clk); } @@ -252,17 +258,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); #endif #ifdef CONFIG_ACPI -static struct dw_dma_platform_data dw_dma_acpi_pdata = { - .nr_channels = 8, - .is_private = true, - .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, - .chan_priority = CHAN_PRIORITY_ASCENDING, - .block_size = 4095, - .nr_masters = 2, -}; - static const struct acpi_device_id dw_dma_acpi_id_table[] = { - { "INTL9C60", (kernel_ulong_t)&dw_dma_acpi_pdata }, + { "INTL9C60", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 0675e268d577..16fe773fb846 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, return ret; } -static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) +static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) { - s16 *memcpy_ch = memcpy_channels; - if (!memcpy_channels) return false; - while (*memcpy_ch != -1) { - if (*memcpy_ch == ch_num) + while (*memcpy_channels != -1) { + if (*memcpy_channels == ch_num) return true; - memcpy_ch++; + memcpy_channels++; } return false; } @@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) { struct dma_device *s_ddev = &ecc->dma_slave; struct dma_device *m_ddev = NULL; - s16 *memcpy_channels = ecc->info->memcpy_channels; + s32 *memcpy_channels = ecc->info->memcpy_channels; int i, j; dma_cap_zero(s_ddev->cap_mask); @@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); if (prop) { const char pname[] = "ti,edma-memcpy-channels"; - size_t nelm = sz / sizeof(s16); - s16 *memcpy_ch; + size_t nelm = sz / sizeof(s32); + s32 *memcpy_ch; - memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), GFP_KERNEL); if (!memcpy_ch) return ERR_PTR(-ENOMEM); - ret = of_property_read_u16_array(dev->of_node, pname, - (u16 *)memcpy_ch, nelm); + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)memcpy_ch, nelm); if (ret) return ERR_PTR(ret); @@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, &sz); if (prop) { const char pname[] = "ti,edma-reserved-slot-ranges"; + u32 (*tmp)[2]; s16 (*rsv_slots)[2]; - size_t nelm = sz / sizeof(*rsv_slots); + size_t nelm = sz / sizeof(*tmp); struct edma_rsv_info *rsv_info; + int i; if (!nelm) return info; + tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ERR_PTR(-ENOMEM); + rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); - if (!rsv_info) + if (!rsv_info) { + kfree(tmp); return ERR_PTR(-ENOMEM); + } rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), GFP_KERNEL); - if (!rsv_slots) + if (!rsv_slots) { + kfree(tmp); return ERR_PTR(-ENOMEM); + } - ret = of_property_read_u16_array(dev->of_node, pname, - (u16 *)rsv_slots, nelm * 2); - if (ret) + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)tmp, nelm * 2); + if (ret) { + kfree(tmp); return ERR_PTR(ret); + } + for (i = 0; i < nelm; i++) { + rsv_slots[i][0] = tmp[i][0]; + rsv_slots[i][1] = tmp[i][1]; + } rsv_slots[nelm][0] = -1; rsv_slots[nelm][1] = -1; + info->rsv = rsv_info; info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; + + kfree(tmp); } return info; diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 9dfa2b0fa5da..9cb93c5b655d 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -29,6 +29,7 @@ #include <linux/dmapool.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/irq.h> #include <linux/module.h> #include <linux/of_device.h> @@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) /* Register DMA channel rx irq */ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(chan->dev, chan->rx_irq, xgene_dma_chan_ring_isr, 0, chan->name, chan); @@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) for (j = 0; j < i; j++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } @@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma) for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } } diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index dbf53e08bdd1..be163e20fe56 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_EDAC) := edac_stub.o obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o -edac_core-y += edac_module.o edac_device_sysfs.o +edac_core-y += edac_module.o edac_device_sysfs.o wq.o edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 592af5f0cf39..a97900333e2d 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -390,11 +390,9 @@ static void edac_device_workq_function(struct work_struct *work_req) * between integral seconds */ if (edac_dev->poll_msec == 1000) - queue_delayed_work(edac_workqueue, &edac_dev->work, - round_jiffies_relative(edac_dev->delay)); + edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else - queue_delayed_work(edac_workqueue, &edac_dev->work, - edac_dev->delay); + edac_queue_work(&edac_dev->work, edac_dev->delay); } /* @@ -402,8 +400,8 @@ static void edac_device_workq_function(struct work_struct *work_req) * initialize a workq item for this edac_device instance * passing in the new delay period in msec */ -void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, - unsigned msec) +static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, + unsigned msec) { edac_dbg(0, "\n"); @@ -422,29 +420,23 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, * to fire together on the 1 second exactly */ if (edac_dev->poll_msec == 1000) - queue_delayed_work(edac_workqueue, &edac_dev->work, - round_jiffies_relative(edac_dev->delay)); + edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else - queue_delayed_work(edac_workqueue, &edac_dev->work, - edac_dev->delay); + edac_queue_work(&edac_dev->work, edac_dev->delay); } /* * edac_device_workq_teardown * stop the workq processing on this edac_dev */ -void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) +static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) { - int status; - if (!edac_dev->edac_check) return; - status = cancel_delayed_work(&edac_dev->work); - if (status == 0) { - /* workq instance might be running, wait for it */ - flush_workqueue(edac_workqueue); - } + edac_dev->op_state = OP_OFFLINE; + + edac_stop_work(&edac_dev->work); } /* @@ -457,16 +449,15 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsigned long value) { - /* cancel the current workq request, without the mutex lock */ - edac_device_workq_teardown(edac_dev); + unsigned long jiffs = msecs_to_jiffies(value); - /* acquire the mutex before doing the workq setup */ - mutex_lock(&device_ctls_mutex); + if (value == 1000) + jiffs = round_jiffies_relative(value); - /* restart the workq request, with new delay value */ - edac_device_workq_setup(edac_dev, value); + edac_dev->poll_msec = value; + edac_dev->delay = jiffs; - mutex_unlock(&device_ctls_mutex); + edac_mod_work(&edac_dev->work, jiffs); } /* diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index fb68a06ad683..93da1a45c716 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -237,11 +237,6 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) /* get the /sys/devices/system/edac reference */ edac_subsys = edac_get_sysfs_subsys(); - if (edac_subsys == NULL) { - edac_dbg(1, "no edac_subsys error\n"); - err = -ENODEV; - goto err_out; - } /* Point to the 'edac_subsys' this instance 'reports' to */ edac_dev->edac_subsys = edac_subsys; @@ -256,7 +251,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) if (!try_module_get(edac_dev->owner)) { err = -ENODEV; - goto err_mod_get; + goto err_out; } /* register */ @@ -282,9 +277,6 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) err_kobj_reg: module_put(edac_dev->owner); -err_mod_get: - edac_put_sysfs_subsys(); - err_out: return err; } @@ -306,7 +298,6 @@ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) * b) 'kfree' the memory */ kobject_put(&dev->kobj); - edac_put_sysfs_subsys(); } /* edac_dev -> instance information */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 77ecd6a4179a..8adfc167c2e3 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -548,8 +548,7 @@ static void edac_mc_workq_function(struct work_struct *work_req) mutex_unlock(&mem_ctls_mutex); /* Reschedule */ - queue_delayed_work(edac_workqueue, &mci->work, - msecs_to_jiffies(edac_mc_get_poll_msec())); + edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); } /* @@ -561,8 +560,7 @@ static void edac_mc_workq_function(struct work_struct *work_req) * * called with the mem_ctls_mutex held */ -static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, - bool init) +static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) { edac_dbg(0, "\n"); @@ -570,10 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, if (mci->op_state != OP_RUNNING_POLL) return; - if (init) - INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); - mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); + edac_queue_work(&mci->work, msecs_to_jiffies(msec)); } /* @@ -586,18 +583,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, */ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) { - int status; - - if (mci->op_state != OP_RUNNING_POLL) - return; - - status = cancel_delayed_work(&mci->work); - if (status == 0) { - edac_dbg(0, "not canceled, flush the queue\n"); + mci->op_state = OP_OFFLINE; - /* workq instance might be running, wait for it */ - flush_workqueue(edac_workqueue); - } + edac_stop_work(&mci->work); } /* @@ -616,9 +604,8 @@ void edac_mc_reset_delay_period(unsigned long value) list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - edac_mc_workq_setup(mci, value, false); + edac_mod_work(&mci->work, value); } - mutex_unlock(&mem_ctls_mutex); } @@ -789,7 +776,7 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, /* This instance is NOW RUNNING */ mci->op_state = OP_RUNNING_POLL; - edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true); + edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); } else { mci->op_state = OP_RUNNING_INTERRUPT; } diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index a75acea0f674..26e65ab5932a 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -880,21 +880,26 @@ static struct device_type mci_attr_type = { int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, const struct attribute_group **groups) { + char *name; int i, err; /* * The memory controller needs its own bus, in order to avoid * namespace conflicts at /sys/bus/edac. */ - mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); - if (!mci->bus->name) + name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); + if (!name) return -ENOMEM; + mci->bus->name = name; + edac_dbg(0, "creating bus %s\n", mci->bus->name); err = bus_register(mci->bus); - if (err < 0) - goto fail_free_name; + if (err < 0) { + kfree(name); + return err; + } /* get the /sys/devices/system/edac subsys reference */ mci->dev.type = &mci_attr_type; @@ -961,8 +966,8 @@ fail_unregister_dimm: device_unregister(&mci->dev); fail_unregister_bus: bus_unregister(mci->bus); -fail_free_name: - kfree(mci->bus->name); + kfree(name); + return err; } @@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) void edac_unregister_sysfs(struct mem_ctl_info *mci) { + const char *name = mci->bus->name; + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); device_unregister(&mci->dev); bus_unregister(mci->bus); - kfree(mci->bus->name); + kfree(name); } static void mc_attr_release(struct device *dev) @@ -1018,24 +1025,15 @@ static struct device_type mc_attr_type = { */ int __init edac_mc_sysfs_init(void) { - struct bus_type *edac_subsys; int err; - /* get the /sys/devices/system/edac subsys reference */ - edac_subsys = edac_get_sysfs_subsys(); - if (edac_subsys == NULL) { - edac_dbg(1, "no edac_subsys\n"); - err = -EINVAL; - goto out; - } - mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); if (!mci_pdev) { err = -ENOMEM; - goto out_put_sysfs; + goto out; } - mci_pdev->bus = edac_subsys; + mci_pdev->bus = edac_get_sysfs_subsys(); mci_pdev->type = &mc_attr_type; device_initialize(mci_pdev); dev_set_name(mci_pdev, "mc"); @@ -1050,8 +1048,6 @@ int __init edac_mc_sysfs_init(void) out_dev_free: kfree(mci_pdev); - out_put_sysfs: - edac_put_sysfs_subsys(); out: return err; } @@ -1059,5 +1055,4 @@ int __init edac_mc_sysfs_init(void) void edac_mc_sysfs_exit(void) { device_unregister(mci_pdev); - edac_put_sysfs_subsys(); } diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 9cb082a19d8a..5f8543be995a 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c @@ -43,9 +43,6 @@ module_param_call(edac_debug_level, edac_set_debug_level, param_get_int, MODULE_PARM_DESC(edac_debug_level, "EDAC debug level: [0-4], default: 2"); #endif -/* scope is to module level only */ -struct workqueue_struct *edac_workqueue; - /* * edac_op_state_to_string() */ @@ -66,31 +63,37 @@ char *edac_op_state_to_string(int opstate) } /* - * edac_workqueue_setup - * initialize the edac work queue for polling operations + * sysfs object: /sys/devices/system/edac + * need to export to other files */ -static int edac_workqueue_setup(void) +static struct bus_type edac_subsys = { + .name = "edac", + .dev_name = "edac", +}; + +static int edac_subsys_init(void) { - edac_workqueue = create_singlethread_workqueue("edac-poller"); - if (edac_workqueue == NULL) - return -ENODEV; - else - return 0; + int err; + + /* create the /sys/devices/system/edac directory */ + err = subsys_system_register(&edac_subsys, NULL); + if (err) + printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); + + return err; } -/* - * edac_workqueue_teardown - * teardown the edac workqueue - */ -static void edac_workqueue_teardown(void) +static void edac_subsys_exit(void) { - if (edac_workqueue) { - flush_workqueue(edac_workqueue); - destroy_workqueue(edac_workqueue); - edac_workqueue = NULL; - } + bus_unregister(&edac_subsys); } +/* return pointer to the 'edac' node in sysfs */ +struct bus_type *edac_get_sysfs_subsys(void) +{ + return &edac_subsys; +} +EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys); /* * edac_init * module initialization entry point @@ -101,6 +104,10 @@ static int __init edac_init(void) edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n"); + err = edac_subsys_init(); + if (err) + return err; + /* * Harvest and clear any boot/initialization PCI parity errors * @@ -129,6 +136,8 @@ err_wq: edac_mc_sysfs_exit(); err_sysfs: + edac_subsys_exit(); + return err; } @@ -144,6 +153,7 @@ static void __exit edac_exit(void) edac_workqueue_teardown(); edac_mc_sysfs_exit(); edac_debugfs_exit(); + edac_subsys_exit(); } /* diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index b95a48fc723d..cfaacb99c973 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -47,10 +47,12 @@ extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); /* edac core workqueue: single CPU mode */ -extern struct workqueue_struct *edac_workqueue; -extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, - unsigned msec); -extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); +int edac_workqueue_setup(void); +void edac_workqueue_teardown(void); +bool edac_queue_work(struct delayed_work *work, unsigned long delay); +bool edac_stop_work(struct delayed_work *work); +bool edac_mod_work(struct delayed_work *work, unsigned long delay); + extern void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsigned long value); extern void edac_mc_reset_delay_period(unsigned long value); diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 2cf44b4db80c..99685388d3fb 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -178,41 +178,6 @@ static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) INIT_LIST_HEAD(&pci->link); } -#if 0 -/* Older code, but might use in the future */ - -/* - * edac_pci_find() - * Search for an edac_pci_ctl_info structure whose index is 'idx' - * - * If found, return a pointer to the structure - * Else return NULL. - * - * Caller must hold pci_ctls_mutex. - */ -struct edac_pci_ctl_info *edac_pci_find(int idx) -{ - struct list_head *item; - struct edac_pci_ctl_info *pci; - - /* Iterage over list, looking for exact match of ID */ - list_for_each(item, &edac_pci_list) { - pci = list_entry(item, struct edac_pci_ctl_info, link); - - if (pci->pci_idx >= idx) { - if (pci->pci_idx == idx) - return pci; - - /* not on list, so terminate early */ - break; - } - } - - return NULL; -} -EXPORT_SYMBOL_GPL(edac_pci_find); -#endif - /* * edac_pci_workq_function() * @@ -244,7 +209,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) delay = msecs_to_jiffies(msec); /* Reschedule only if we are in POLL mode */ - queue_delayed_work(edac_workqueue, &pci->work, delay); + edac_queue_work(&pci->work, delay); } mutex_unlock(&edac_pci_ctls_mutex); @@ -264,8 +229,8 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, edac_dbg(0, "\n"); INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); - queue_delayed_work(edac_workqueue, &pci->work, - msecs_to_jiffies(edac_pci_get_poll_msec())); + + edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec())); } /* @@ -274,37 +239,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, */ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) { - int status; - - edac_dbg(0, "\n"); - - status = cancel_delayed_work(&pci->work); - if (status == 0) - flush_workqueue(edac_workqueue); -} - -/* - * edac_pci_reset_delay_period - * - * called with a new period value for the workq period - * a) stop current workq timer - * b) restart workq timer with new value - */ -void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, - unsigned long value) -{ edac_dbg(0, "\n"); - edac_pci_workq_teardown(pci); - - /* need to lock for the setup */ - mutex_lock(&edac_pci_ctls_mutex); - - edac_pci_workq_setup(pci, value); + pci->op_state = OP_OFFLINE; - mutex_unlock(&edac_pci_ctls_mutex); + edac_stop_work(&pci->work); } -EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period); /* * edac_pci_alloc_index: Allocate a unique PCI index number diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 24d877f6e577..6e3428ba400f 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -331,10 +331,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = { }; /** - * edac_pci_main_kobj_setup() - * - * setup the sysfs for EDAC PCI attributes - * assumes edac_subsys has already been initialized + * edac_pci_main_kobj_setup: Setup the sysfs for EDAC PCI attributes. */ static int edac_pci_main_kobj_setup(void) { @@ -351,11 +348,6 @@ static int edac_pci_main_kobj_setup(void) * controls and attributes */ edac_subsys = edac_get_sysfs_subsys(); - if (edac_subsys == NULL) { - edac_dbg(1, "no edac_subsys\n"); - err = -ENODEV; - goto decrement_count_fail; - } /* Bump the reference count on this module to ensure the * modules isn't unloaded until we deconstruct the top @@ -364,7 +356,7 @@ static int edac_pci_main_kobj_setup(void) if (!try_module_get(THIS_MODULE)) { edac_dbg(1, "try_module_get() failed\n"); err = -ENODEV; - goto mod_get_fail; + goto decrement_count_fail; } edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); @@ -399,9 +391,6 @@ kobject_init_and_add_fail: kzalloc_fail: module_put(THIS_MODULE); -mod_get_fail: - edac_put_sysfs_subsys(); - decrement_count_fail: /* if are on this error exit, nothing to tear down */ atomic_dec(&edac_pci_sysfs_refcount); @@ -426,7 +415,6 @@ static void edac_pci_main_kobj_teardown(void) if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { edac_dbg(0, "called kobject_put on main kobj\n"); kobject_put(edac_pci_top_main_kobj); - edac_put_sysfs_subsys(); } } diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index ff07aae5b7fb..952e411f01f2 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c @@ -26,8 +26,6 @@ EXPORT_SYMBOL_GPL(edac_handlers); int edac_err_assert = 0; EXPORT_SYMBOL_GPL(edac_err_assert); -static atomic_t edac_subsys_valid = ATOMIC_INIT(0); - int edac_report_status = EDAC_REPORTING_ENABLED; EXPORT_SYMBOL_GPL(edac_report_status); @@ -68,42 +66,3 @@ void edac_atomic_assert_error(void) edac_err_assert++; } EXPORT_SYMBOL_GPL(edac_atomic_assert_error); - -/* - * sysfs object: /sys/devices/system/edac - * need to export to other files - */ -struct bus_type edac_subsys = { - .name = "edac", - .dev_name = "edac", -}; -EXPORT_SYMBOL_GPL(edac_subsys); - -/* return pointer to the 'edac' node in sysfs */ -struct bus_type *edac_get_sysfs_subsys(void) -{ - int err = 0; - - if (atomic_read(&edac_subsys_valid)) - goto out; - - /* create the /sys/devices/system/edac directory */ - err = subsys_system_register(&edac_subsys, NULL); - if (err) { - printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); - return NULL; - } - -out: - atomic_inc(&edac_subsys_valid); - return &edac_subsys; -} -EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys); - -void edac_put_sysfs_subsys(void) -{ - /* last user unregisters it */ - if (atomic_dec_and_test(&edac_subsys_valid)) - bus_unregister(&edac_subsys); -} -EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys); diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 40917775dca1..c655162caf08 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -575,9 +575,7 @@ static void i5100_check_error(struct mem_ctl_info *mci) static void i5100_refresh_scrubbing(struct work_struct *work) { - struct delayed_work *i5100_scrubbing = container_of(work, - struct delayed_work, - work); + struct delayed_work *i5100_scrubbing = to_delayed_work(work); struct i5100_priv *priv = container_of(i5100_scrubbing, struct i5100_priv, i5100_scrubbing); diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 23ef8e9f2c9a..b7139c160baf 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -20,6 +20,7 @@ #include <linux/edac.h> #include <linux/smp.h> #include <linux/gfp.h> +#include <linux/fsl/edac.h> #include <linux/of_platform.h> #include <linux/of_device.h> @@ -238,10 +239,12 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) return IRQ_HANDLED; } -int mpc85xx_pci_err_probe(struct platform_device *op) +static int mpc85xx_pci_err_probe(struct platform_device *op) { struct edac_pci_ctl_info *pci; struct mpc85xx_pci_pdata *pdata; + struct mpc85xx_edac_pci_plat_data *plat_data; + struct device_node *of_node; struct resource r; int res = 0; @@ -266,7 +269,15 @@ int mpc85xx_pci_err_probe(struct platform_device *op) pdata->name = "mpc85xx_pci_err"; pdata->irq = NO_IRQ; - if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0) + plat_data = op->dev.platform_data; + if (!plat_data) { + dev_err(&op->dev, "no platform data"); + res = -ENXIO; + goto err; + } + of_node = plat_data->of_node; + + if (mpc85xx_pcie_find_capability(of_node) > 0) pdata->is_pcie = true; dev_set_drvdata(&op->dev, pci); @@ -284,7 +295,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op) pdata->edac_idx = edac_pci_idx++; - res = of_address_to_resource(op->dev.of_node, 0, &r); + res = of_address_to_resource(of_node, 0, &r); if (res) { printk(KERN_ERR "%s: Unable to get resource for " "PCI err regs\n", __func__); @@ -339,7 +350,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op) } if (edac_op_state == EDAC_OPSTATE_INT) { - pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); + pdata->irq = irq_of_parse_and_map(of_node, 0); res = devm_request_irq(&op->dev, pdata->irq, mpc85xx_pci_isr, IRQF_SHARED, @@ -386,8 +397,22 @@ err: devres_release_group(&op->dev, mpc85xx_pci_err_probe); return res; } -EXPORT_SYMBOL(mpc85xx_pci_err_probe); +static const struct platform_device_id mpc85xx_pci_err_match[] = { + { + .name = "mpc85xx-pci-edac" + }, + {} +}; + +static struct platform_driver mpc85xx_pci_err_driver = { + .probe = mpc85xx_pci_err_probe, + .id_table = mpc85xx_pci_err_match, + .driver = { + .name = "mpc85xx_pci_err", + .suppress_bind_attrs = true, + }, +}; #endif /* CONFIG_PCI */ /**************************** L2 Err device ***************************/ @@ -1208,6 +1233,14 @@ static void __init mpc85xx_mc_clear_rfxe(void *data) } #endif +static struct platform_driver * const drivers[] = { + &mpc85xx_mc_err_driver, + &mpc85xx_l2_err_driver, +#ifdef CONFIG_PCI + &mpc85xx_pci_err_driver, +#endif +}; + static int __init mpc85xx_mc_init(void) { int res = 0; @@ -1226,13 +1259,9 @@ static int __init mpc85xx_mc_init(void) break; } - res = platform_driver_register(&mpc85xx_mc_err_driver); - if (res) - printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n"); - - res = platform_driver_register(&mpc85xx_l2_err_driver); + res = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); if (res) - printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); + printk(KERN_WARNING EDAC_MOD_STR "drivers fail to register\n"); #ifdef CONFIG_FSL_SOC_BOOKE pvr = mfspr(SPRN_PVR); @@ -1270,8 +1299,7 @@ static void __exit mpc85xx_mc_exit(void) on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); } #endif - platform_driver_unregister(&mpc85xx_l2_err_driver); - platform_driver_unregister(&mpc85xx_mc_err_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(mpc85xx_mc_exit); diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 0574e1bbe45c..6c54127e6eae 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -847,6 +847,15 @@ static struct platform_driver mv64x60_mc_err_driver = { } }; +static struct platform_driver * const drivers[] = { + &mv64x60_mc_err_driver, + &mv64x60_cpu_err_driver, + &mv64x60_sram_err_driver, +#ifdef CONFIG_PCI + &mv64x60_pci_err_driver, +#endif +}; + static int __init mv64x60_edac_init(void) { int ret = 0; @@ -863,39 +872,13 @@ static int __init mv64x60_edac_init(void) break; } - ret = platform_driver_register(&mv64x60_mc_err_driver); - if (ret) - printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n"); - - ret = platform_driver_register(&mv64x60_cpu_err_driver); - if (ret) - printk(KERN_WARNING EDAC_MOD_STR - "CPU err failed to register\n"); - - ret = platform_driver_register(&mv64x60_sram_err_driver); - if (ret) - printk(KERN_WARNING EDAC_MOD_STR - "SRAM err failed to register\n"); - -#ifdef CONFIG_PCI - ret = platform_driver_register(&mv64x60_pci_err_driver); - if (ret) - printk(KERN_WARNING EDAC_MOD_STR - "PCI err failed to register\n"); -#endif - - return ret; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(mv64x60_edac_init); static void __exit mv64x60_edac_exit(void) { -#ifdef CONFIG_PCI - platform_driver_unregister(&mv64x60_pci_err_driver); -#endif - platform_driver_unregister(&mv64x60_sram_err_driver); - platform_driver_unregister(&mv64x60_cpu_err_driver); - platform_driver_unregister(&mv64x60_mc_err_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(mv64x60_edac_exit); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 429309c62699..e438ee5b433f 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -65,15 +65,20 @@ static const u32 ibridge_dram_rule[] = { 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, }; -#define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) -#define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) -#define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) +static const u32 knl_dram_rule[] = { + 0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */ + 0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */ + 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */ + 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */ + 0x100, 0x108, 0x110, 0x118, /* 20-23 */ +}; + #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) #define A7MODE(reg) GET_BITFIELD(reg, 26, 26) -static char *get_dram_attr(u32 reg) +static char *show_dram_attr(u32 attr) { - switch(DRAM_ATTR(reg)) { + switch (attr) { case 0: return "DRAM"; case 1: @@ -97,6 +102,14 @@ static const u32 ibridge_interleave_list[] = { 0xdc, 0xe4, 0xec, 0xf4, 0xfc, }; +static const u32 knl_interleave_list[] = { + 0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */ + 0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */ + 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */ + 0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */ + 0x104, 0x10c, 0x114, 0x11c, /* 20-23 */ +}; + struct interleave_pkg { unsigned char start; unsigned char end; @@ -134,10 +147,13 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, /* Devices 12 Function 7 */ #define TOLM 0x80 -#define TOHM 0x84 +#define TOHM 0x84 #define HASWELL_TOLM 0xd0 #define HASWELL_TOHM_0 0xd4 #define HASWELL_TOHM_1 0xd8 +#define KNL_TOLM 0xd0 +#define KNL_TOHM_0 0xd4 +#define KNL_TOHM_1 0xd8 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) @@ -148,6 +164,8 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) +#define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14) + #define SAD_CONTROL 0xf4 /* Device 14 function 0 */ @@ -170,6 +188,7 @@ static const u32 tad_dram_rule[] = { /* Device 15, function 0 */ #define MCMTR 0x7c +#define KNL_MCMTR 0x624 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) @@ -186,6 +205,8 @@ static const int mtr_regs[] = { 0x80, 0x84, 0x88, }; +static const int knl_mtr_reg = 0xb60; + #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) @@ -256,6 +277,9 @@ static const u32 correrrthrsld[] = { #define NUM_CHANNELS 8 /* 2MC per socket, four chan per MC */ #define MAX_DIMMS 3 /* Max DIMMS per channel */ +#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ +#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ +#define KNL_MAX_EDCS 8 /* Embedded DRAM controllers */ #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ enum type { @@ -263,6 +287,7 @@ enum type { IVY_BRIDGE, HASWELL, BROADWELL, + KNIGHTS_LANDING, }; struct sbridge_pvt; @@ -273,6 +298,10 @@ struct sbridge_info { u64 (*get_tolm)(struct sbridge_pvt *pvt); u64 (*get_tohm)(struct sbridge_pvt *pvt); u64 (*rir_limit)(u32 reg); + u64 (*sad_limit)(u32 reg); + u32 (*interleave_mode)(u32 reg); + char* (*show_interleave_mode)(u32 reg); + u32 (*dram_attr)(u32 reg); const u32 *dram_rule; const u32 *interleave_list; const struct interleave_pkg *interleave_pkg; @@ -308,6 +337,16 @@ struct sbridge_dev { struct mem_ctl_info *mci; }; +struct knl_pvt { + struct pci_dev *pci_cha[KNL_MAX_CHAS]; + struct pci_dev *pci_channel[KNL_MAX_CHANNELS]; + struct pci_dev *pci_mc0; + struct pci_dev *pci_mc1; + struct pci_dev *pci_mc0_misc; + struct pci_dev *pci_mc1_misc; + struct pci_dev *pci_mc_info; /* tolm, tohm */ +}; + struct sbridge_pvt { struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; struct pci_dev *pci_sad0, *pci_sad1; @@ -336,6 +375,7 @@ struct sbridge_pvt { /* Memory description */ u64 tolm, tohm; + struct knl_pvt knl; }; #define PCI_DESCR(device_id, opt) \ @@ -509,6 +549,50 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { {0,} /* 0 terminated list. */ }; +/* Knight's Landing Support */ +/* + * KNL's memory channels are swizzled between memory controllers. + * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 + */ +#define knl_channel_remap(channel) ((channel + 3) % 6) + +/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 +/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843 +/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 +/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a +/* SAD target - 1-29-1 (1 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b +/* Caching / Home Agent */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c +/* Device with TOLM and TOHM, 0-5-0 (1 of these) */ +#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810 + +/* + * KNL differs from SB, IB, and Haswell in that it has multiple + * instances of the same device with the same device ID, so we handle that + * by creating as many copies in the table as we expect to find. + * (Like device ID must be grouped together.) + */ + +static const struct pci_id_descr pci_dev_descr_knl[] = { + [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) }, + [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) }, + [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)}, + [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) }, + [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) }, + [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) }, + [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) }, +}; + +static const struct pci_id_table pci_dev_descr_knl_table[] = { + PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), + {0,} +}; + /* * Broadwell support * @@ -585,6 +669,7 @@ static const struct pci_device_id sbridge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0)}, {0,} /* 0 terminated list. */ }; @@ -598,7 +683,7 @@ static inline int numrank(enum type type, u32 mtr) int ranks = (1 << RANK_CNT_BITS(mtr)); int max = 4; - if (type == HASWELL || type == BROADWELL) + if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING) max = 8; if (ranks > max) { @@ -636,10 +721,19 @@ static inline int numcol(u32 mtr) return 1 << cols; } -static struct sbridge_dev *get_sbridge_dev(u8 bus) +static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus) { struct sbridge_dev *sbridge_dev; + /* + * If we have devices scattered across several busses that pertain + * to the same memory controller, we'll lump them all together. + */ + if (multi_bus) { + return list_first_entry_or_null(&sbridge_edac_list, + struct sbridge_dev, list); + } + list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { if (sbridge_dev->bus == bus) return sbridge_dev; @@ -718,6 +812,67 @@ static u64 rir_limit(u32 reg) return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; } +static u64 sad_limit(u32 reg) +{ + return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff; +} + +static u32 interleave_mode(u32 reg) +{ + return GET_BITFIELD(reg, 1, 1); +} + +char *show_interleave_mode(u32 reg) +{ + return interleave_mode(reg) ? "8:6" : "[8:6]XOR[18:16]"; +} + +static u32 dram_attr(u32 reg) +{ + return GET_BITFIELD(reg, 2, 3); +} + +static u64 knl_sad_limit(u32 reg) +{ + return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff; +} + +static u32 knl_interleave_mode(u32 reg) +{ + return GET_BITFIELD(reg, 1, 2); +} + +static char *knl_show_interleave_mode(u32 reg) +{ + char *s; + + switch (knl_interleave_mode(reg)) { + case 0: + s = "use address bits [8:6]"; + break; + case 1: + s = "use address bits [10:8]"; + break; + case 2: + s = "use address bits [14:12]"; + break; + case 3: + s = "use address bits [32:30]"; + break; + default: + WARN_ON(1); + break; + } + + return s; +} + +static u32 dram_attr_knl(u32 reg) +{ + return GET_BITFIELD(reg, 3, 4); +} + + static enum mem_type get_memory_type(struct sbridge_pvt *pvt) { u32 reg; @@ -769,6 +924,12 @@ out: return mtype; } +static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr) +{ + /* for KNL value is fixed */ + return DEV_X16; +} + static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr) { /* there's no way to figure out */ @@ -812,6 +973,12 @@ static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr) return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9)); } +static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt) +{ + /* DDR4 RDIMMS and LRDIMMS are supported */ + return MEM_RDDR4; +} + static u8 get_node_id(struct sbridge_pvt *pvt) { u32 reg; @@ -827,6 +994,15 @@ static u8 haswell_get_node_id(struct sbridge_pvt *pvt) return GET_BITFIELD(reg, 0, 3); } +static u8 knl_get_node_id(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); + return GET_BITFIELD(reg, 0, 2); +} + + static u64 haswell_get_tolm(struct sbridge_pvt *pvt) { u32 reg; @@ -848,6 +1024,26 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt) return rc | 0x1ffffff; } +static u64 knl_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®); + return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; +} + +static u64 knl_get_tohm(struct sbridge_pvt *pvt) +{ + u64 rc; + u32 reg_lo, reg_hi; + + pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo); + pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi); + rc = ((u64)reg_hi << 32) | reg_lo; + return rc | 0x3ffffff; +} + + static u64 haswell_rir_limit(u32 reg) { return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; @@ -905,11 +1101,22 @@ static int check_if_ecc_is_active(const u8 bus, enum type type) case BROADWELL: id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA; break; + case KNIGHTS_LANDING: + /* + * KNL doesn't group things by bus the same way + * SB/IB/Haswell does. + */ + id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA; + break; default: return -ENODEV; } - pdev = get_pdev_same_bus(bus, id); + if (type != KNIGHTS_LANDING) + pdev = get_pdev_same_bus(bus, id); + else + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0); + if (!pdev) { sbridge_printk(KERN_ERR, "Couldn't find PCI device " "%04x:%04x! on bus %02d\n", @@ -917,7 +1124,8 @@ static int check_if_ecc_is_active(const u8 bus, enum type type) return -ENODEV; } - pci_read_config_dword(pdev, MCMTR, &mcmtr); + pci_read_config_dword(pdev, + type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr); if (!IS_ECC_ENABLED(mcmtr)) { sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); return -ENODEV; @@ -925,6 +1133,476 @@ static int check_if_ecc_is_active(const u8 bus, enum type type) return 0; } +/* Low bits of TAD limit, and some metadata. */ +static const u32 knl_tad_dram_limit_lo[] = { + 0x400, 0x500, 0x600, 0x700, + 0x800, 0x900, 0xa00, 0xb00, +}; + +/* Low bits of TAD offset. */ +static const u32 knl_tad_dram_offset_lo[] = { + 0x404, 0x504, 0x604, 0x704, + 0x804, 0x904, 0xa04, 0xb04, +}; + +/* High 16 bits of TAD limit and offset. */ +static const u32 knl_tad_dram_hi[] = { + 0x408, 0x508, 0x608, 0x708, + 0x808, 0x908, 0xa08, 0xb08, +}; + +/* Number of ways a tad entry is interleaved. */ +static const u32 knl_tad_ways[] = { + 8, 6, 4, 3, 2, 1, +}; + +/* + * Retrieve the n'th Target Address Decode table entry + * from the memory controller's TAD table. + * + * @pvt: driver private data + * @entry: which entry you want to retrieve + * @mc: which memory controller (0 or 1) + * @offset: output tad range offset + * @limit: output address of first byte above tad range + * @ways: output number of interleave ways + * + * The offset value has curious semantics. It's a sort of running total + * of the sizes of all the memory regions that aren't mapped in this + * tad table. + */ +static int knl_get_tad(const struct sbridge_pvt *pvt, + const int entry, + const int mc, + u64 *offset, + u64 *limit, + int *ways) +{ + u32 reg_limit_lo, reg_offset_lo, reg_hi; + struct pci_dev *pci_mc; + int way_id; + + switch (mc) { + case 0: + pci_mc = pvt->knl.pci_mc0; + break; + case 1: + pci_mc = pvt->knl.pci_mc1; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + pci_read_config_dword(pci_mc, + knl_tad_dram_limit_lo[entry], ®_limit_lo); + pci_read_config_dword(pci_mc, + knl_tad_dram_offset_lo[entry], ®_offset_lo); + pci_read_config_dword(pci_mc, + knl_tad_dram_hi[entry], ®_hi); + + /* Is this TAD entry enabled? */ + if (!GET_BITFIELD(reg_limit_lo, 0, 0)) + return -ENODEV; + + way_id = GET_BITFIELD(reg_limit_lo, 3, 5); + + if (way_id < ARRAY_SIZE(knl_tad_ways)) { + *ways = knl_tad_ways[way_id]; + } else { + *ways = 0; + sbridge_printk(KERN_ERR, + "Unexpected value %d in mc_tad_limit_lo wayness field\n", + way_id); + return -ENODEV; + } + + /* + * The least significant 6 bits of base and limit are truncated. + * For limit, we fill the missing bits with 1s. + */ + *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) | + ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32); + *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 | + ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32); + + return 0; +} + +/* Determine which memory controller is responsible for a given channel. */ +static int knl_channel_mc(int channel) +{ + WARN_ON(channel < 0 || channel >= 6); + + return channel < 3 ? 1 : 0; +} + +/* + * Get the Nth entry from EDC_ROUTE_TABLE register. + * (This is the per-tile mapping of logical interleave targets to + * physical EDC modules.) + * + * entry 0: 0:2 + * 1: 3:5 + * 2: 6:8 + * 3: 9:11 + * 4: 12:14 + * 5: 15:17 + * 6: 18:20 + * 7: 21:23 + * reserved: 24:31 + */ +static u32 knl_get_edc_route(int entry, u32 reg) +{ + WARN_ON(entry >= KNL_MAX_EDCS); + return GET_BITFIELD(reg, entry*3, (entry*3)+2); +} + +/* + * Get the Nth entry from MC_ROUTE_TABLE register. + * (This is the per-tile mapping of logical interleave targets to + * physical DRAM channels modules.) + * + * entry 0: mc 0:2 channel 18:19 + * 1: mc 3:5 channel 20:21 + * 2: mc 6:8 channel 22:23 + * 3: mc 9:11 channel 24:25 + * 4: mc 12:14 channel 26:27 + * 5: mc 15:17 channel 28:29 + * reserved: 30:31 + * + * Though we have 3 bits to identify the MC, we should only see + * the values 0 or 1. + */ + +static u32 knl_get_mc_route(int entry, u32 reg) +{ + int mc, chan; + + WARN_ON(entry >= KNL_MAX_CHANNELS); + + mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); + chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); + + return knl_channel_remap(mc*3 + chan); +} + +/* + * Render the EDC_ROUTE register in human-readable form. + * Output string s should be at least KNL_MAX_EDCS*2 bytes. + */ +static void knl_show_edc_route(u32 reg, char *s) +{ + int i; + + for (i = 0; i < KNL_MAX_EDCS; i++) { + s[i*2] = knl_get_edc_route(i, reg) + '0'; + s[i*2+1] = '-'; + } + + s[KNL_MAX_EDCS*2 - 1] = '\0'; +} + +/* + * Render the MC_ROUTE register in human-readable form. + * Output string s should be at least KNL_MAX_CHANNELS*2 bytes. + */ +static void knl_show_mc_route(u32 reg, char *s) +{ + int i; + + for (i = 0; i < KNL_MAX_CHANNELS; i++) { + s[i*2] = knl_get_mc_route(i, reg) + '0'; + s[i*2+1] = '-'; + } + + s[KNL_MAX_CHANNELS*2 - 1] = '\0'; +} + +#define KNL_EDC_ROUTE 0xb8 +#define KNL_MC_ROUTE 0xb4 + +/* Is this dram rule backed by regular DRAM in flat mode? */ +#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29) + +/* Is this dram rule cached? */ +#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) + +/* Is this rule backed by edc ? */ +#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29) + +/* Is this rule backed by DRAM, cacheable in EDRAM? */ +#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) + +/* Is this rule mod3? */ +#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27) + +/* + * Figure out how big our RAM modules are. + * + * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we + * have to figure this out from the SAD rules, interleave lists, route tables, + * and TAD rules. + * + * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to + * inspect the TAD rules to figure out how large the SAD regions really are. + * + * When we know the real size of a SAD region and how many ways it's + * interleaved, we know the individual contribution of each channel to + * TAD is size/ways. + * + * Finally, we have to check whether each channel participates in each SAD + * region. + * + * Fortunately, KNL only supports one DIMM per channel, so once we know how + * much memory the channel uses, we know the DIMM is at least that large. + * (The BIOS might possibly choose not to map all available memory, in which + * case we will underreport the size of the DIMM.) + * + * In theory, we could try to determine the EDC sizes as well, but that would + * only work in flat mode, not in cache mode. + * + * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS + * elements) + */ +static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) +{ + u64 sad_base, sad_size, sad_limit = 0; + u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; + int sad_rule = 0; + int tad_rule = 0; + int intrlv_ways, tad_ways; + u32 first_pkg, pkg; + int i; + u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */ + u32 dram_rule, interleave_reg; + u32 mc_route_reg[KNL_MAX_CHAS]; + u32 edc_route_reg[KNL_MAX_CHAS]; + int edram_only; + char edc_route_string[KNL_MAX_EDCS*2]; + char mc_route_string[KNL_MAX_CHANNELS*2]; + int cur_reg_start; + int mc; + int channel; + int way; + int participants[KNL_MAX_CHANNELS]; + int participant_count = 0; + + for (i = 0; i < KNL_MAX_CHANNELS; i++) + mc_sizes[i] = 0; + + /* Read the EDC route table in each CHA. */ + cur_reg_start = 0; + for (i = 0; i < KNL_MAX_CHAS; i++) { + pci_read_config_dword(pvt->knl.pci_cha[i], + KNL_EDC_ROUTE, &edc_route_reg[i]); + + if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) { + knl_show_edc_route(edc_route_reg[i-1], + edc_route_string); + if (cur_reg_start == i-1) + edac_dbg(0, "edc route table for CHA %d: %s\n", + cur_reg_start, edc_route_string); + else + edac_dbg(0, "edc route table for CHA %d-%d: %s\n", + cur_reg_start, i-1, edc_route_string); + cur_reg_start = i; + } + } + knl_show_edc_route(edc_route_reg[i-1], edc_route_string); + if (cur_reg_start == i-1) + edac_dbg(0, "edc route table for CHA %d: %s\n", + cur_reg_start, edc_route_string); + else + edac_dbg(0, "edc route table for CHA %d-%d: %s\n", + cur_reg_start, i-1, edc_route_string); + + /* Read the MC route table in each CHA. */ + cur_reg_start = 0; + for (i = 0; i < KNL_MAX_CHAS; i++) { + pci_read_config_dword(pvt->knl.pci_cha[i], + KNL_MC_ROUTE, &mc_route_reg[i]); + + if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) { + knl_show_mc_route(mc_route_reg[i-1], mc_route_string); + if (cur_reg_start == i-1) + edac_dbg(0, "mc route table for CHA %d: %s\n", + cur_reg_start, mc_route_string); + else + edac_dbg(0, "mc route table for CHA %d-%d: %s\n", + cur_reg_start, i-1, mc_route_string); + cur_reg_start = i; + } + } + knl_show_mc_route(mc_route_reg[i-1], mc_route_string); + if (cur_reg_start == i-1) + edac_dbg(0, "mc route table for CHA %d: %s\n", + cur_reg_start, mc_route_string); + else + edac_dbg(0, "mc route table for CHA %d-%d: %s\n", + cur_reg_start, i-1, mc_route_string); + + /* Process DRAM rules */ + for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) { + /* previous limit becomes the new base */ + sad_base = sad_limit; + + pci_read_config_dword(pvt->pci_sad0, + pvt->info.dram_rule[sad_rule], &dram_rule); + + if (!DRAM_RULE_ENABLE(dram_rule)) + break; + + edram_only = KNL_EDRAM_ONLY(dram_rule); + + sad_limit = pvt->info.sad_limit(dram_rule)+1; + sad_size = sad_limit - sad_base; + + pci_read_config_dword(pvt->pci_sad0, + pvt->info.interleave_list[sad_rule], &interleave_reg); + + /* + * Find out how many ways this dram rule is interleaved. + * We stop when we see the first channel again. + */ + first_pkg = sad_pkg(pvt->info.interleave_pkg, + interleave_reg, 0); + for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) { + pkg = sad_pkg(pvt->info.interleave_pkg, + interleave_reg, intrlv_ways); + + if ((pkg & 0x8) == 0) { + /* + * 0 bit means memory is non-local, + * which KNL doesn't support + */ + edac_dbg(0, "Unexpected interleave target %d\n", + pkg); + return -1; + } + + if (pkg == first_pkg) + break; + } + if (KNL_MOD3(dram_rule)) + intrlv_ways *= 3; + + edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n", + sad_rule, + sad_base, + sad_limit, + intrlv_ways, + edram_only ? ", EDRAM" : ""); + + /* + * Find out how big the SAD region really is by iterating + * over TAD tables (SAD regions may contain holes). + * Each memory controller might have a different TAD table, so + * we have to look at both. + * + * Livespace is the memory that's mapped in this TAD table, + * deadspace is the holes (this could be the MMIO hole, or it + * could be memory that's mapped by the other TAD table but + * not this one). + */ + for (mc = 0; mc < 2; mc++) { + sad_actual_size[mc] = 0; + tad_livespace = 0; + for (tad_rule = 0; + tad_rule < ARRAY_SIZE( + knl_tad_dram_limit_lo); + tad_rule++) { + if (knl_get_tad(pvt, + tad_rule, + mc, + &tad_deadspace, + &tad_limit, + &tad_ways)) + break; + + tad_size = (tad_limit+1) - + (tad_livespace + tad_deadspace); + tad_livespace += tad_size; + tad_base = (tad_limit+1) - tad_size; + + if (tad_base < sad_base) { + if (tad_limit > sad_base) + edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n"); + } else if (tad_base < sad_limit) { + if (tad_limit+1 > sad_limit) { + edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n"); + } else { + /* TAD region is completely inside SAD region */ + edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n", + tad_rule, tad_base, + tad_limit, tad_size, + mc); + sad_actual_size[mc] += tad_size; + } + } + tad_base = tad_limit+1; + } + } + + for (mc = 0; mc < 2; mc++) { + edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n", + mc, sad_actual_size[mc], sad_actual_size[mc]); + } + + /* Ignore EDRAM rule */ + if (edram_only) + continue; + + /* Figure out which channels participate in interleave. */ + for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) + participants[channel] = 0; + + /* For each channel, does at least one CHA have + * this channel mapped to the given target? + */ + for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { + for (way = 0; way < intrlv_ways; way++) { + int target; + int cha; + + if (KNL_MOD3(dram_rule)) + target = way; + else + target = 0x7 & sad_pkg( + pvt->info.interleave_pkg, interleave_reg, way); + + for (cha = 0; cha < KNL_MAX_CHAS; cha++) { + if (knl_get_mc_route(target, + mc_route_reg[cha]) == channel + && participants[channel]) { + participant_count++; + participants[channel] = 1; + break; + } + } + } + } + + if (participant_count != intrlv_ways) + edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n", + participant_count, intrlv_ways); + + for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { + mc = knl_channel_mc(channel); + if (participants[channel]) { + edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n", + channel, + sad_actual_size[mc]/intrlv_ways, + sad_rule); + mc_sizes[channel] += + sad_actual_size[mc]/intrlv_ways; + } + } + } + + return 0; +} + static int get_dimm_config(struct mem_ctl_info *mci) { struct sbridge_pvt *pvt = mci->pvt_info; @@ -934,13 +1612,20 @@ static int get_dimm_config(struct mem_ctl_info *mci) u32 reg; enum edac_type mode; enum mem_type mtype; + int channels = pvt->info.type == KNIGHTS_LANDING ? + KNL_MAX_CHANNELS : NUM_CHANNELS; + u64 knl_mc_sizes[KNL_MAX_CHANNELS]; - if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || + pvt->info.type == KNIGHTS_LANDING) pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); else pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); - pvt->sbridge_dev->source_id = SOURCE_ID(reg); + if (pvt->info.type == KNIGHTS_LANDING) + pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg); + else + pvt->sbridge_dev->source_id = SOURCE_ID(reg); pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", @@ -948,31 +1633,42 @@ static int get_dimm_config(struct mem_ctl_info *mci) pvt->sbridge_dev->node_id, pvt->sbridge_dev->source_id); - pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); - if (IS_MIRROR_ENABLED(reg)) { - edac_dbg(0, "Memory mirror is enabled\n"); - pvt->is_mirrored = true; - } else { - edac_dbg(0, "Memory mirror is disabled\n"); + /* KNL doesn't support mirroring or lockstep, + * and is always closed page + */ + if (pvt->info.type == KNIGHTS_LANDING) { + mode = EDAC_S4ECD4ED; pvt->is_mirrored = false; - } - pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); - if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { - edac_dbg(0, "Lockstep is enabled\n"); - mode = EDAC_S8ECD8ED; - pvt->is_lockstep = true; + if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0) + return -1; } else { - edac_dbg(0, "Lockstep is disabled\n"); - mode = EDAC_S4ECD4ED; - pvt->is_lockstep = false; - } - if (IS_CLOSE_PG(pvt->info.mcmtr)) { - edac_dbg(0, "address map is on closed page mode\n"); - pvt->is_close_pg = true; - } else { - edac_dbg(0, "address map is on open page mode\n"); - pvt->is_close_pg = false; + pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); + if (IS_MIRROR_ENABLED(reg)) { + edac_dbg(0, "Memory mirror is enabled\n"); + pvt->is_mirrored = true; + } else { + edac_dbg(0, "Memory mirror is disabled\n"); + pvt->is_mirrored = false; + } + + pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); + if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { + edac_dbg(0, "Lockstep is enabled\n"); + mode = EDAC_S8ECD8ED; + pvt->is_lockstep = true; + } else { + edac_dbg(0, "Lockstep is disabled\n"); + mode = EDAC_S4ECD4ED; + pvt->is_lockstep = false; + } + if (IS_CLOSE_PG(pvt->info.mcmtr)) { + edac_dbg(0, "address map is on closed page mode\n"); + pvt->is_close_pg = true; + } else { + edac_dbg(0, "address map is on open page mode\n"); + pvt->is_close_pg = false; + } } mtype = pvt->info.get_memory_type(pvt); @@ -988,23 +1684,46 @@ static int get_dimm_config(struct mem_ctl_info *mci) else banks = 8; - for (i = 0; i < NUM_CHANNELS; i++) { + for (i = 0; i < channels; i++) { u32 mtr; - if (!pvt->pci_tad[i]) - continue; - for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { + int max_dimms_per_channel; + + if (pvt->info.type == KNIGHTS_LANDING) { + max_dimms_per_channel = 1; + if (!pvt->knl.pci_channel[i]) + continue; + } else { + max_dimms_per_channel = ARRAY_SIZE(mtr_regs); + if (!pvt->pci_tad[i]) + continue; + } + + for (j = 0; j < max_dimms_per_channel; j++) { dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); - pci_read_config_dword(pvt->pci_tad[i], - mtr_regs[j], &mtr); + if (pvt->info.type == KNIGHTS_LANDING) { + pci_read_config_dword(pvt->knl.pci_channel[i], + knl_mtr_reg, &mtr); + } else { + pci_read_config_dword(pvt->pci_tad[i], + mtr_regs[j], &mtr); + } edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); if (IS_DIMM_PRESENT(mtr)) { pvt->channel[i].dimms++; ranks = numrank(pvt->info.type, mtr); - rows = numrow(mtr); - cols = numcol(mtr); + + if (pvt->info.type == KNIGHTS_LANDING) { + /* For DDR4, this is fixed. */ + cols = 1 << 10; + rows = knl_mc_sizes[i] / + ((u64) cols * ranks * banks * 8); + } else { + rows = numrow(mtr); + cols = numcol(mtr); + } size = ((u64)rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); @@ -1069,7 +1788,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) /* SAD_LIMIT Address range is 45:26 */ pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], ®); - limit = SAD_LIMIT(reg); + limit = pvt->info.sad_limit(reg); if (!DRAM_RULE_ENABLE(reg)) continue; @@ -1081,10 +1800,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci) gb = div_u64_rem(tmp_mb, 1024, &mb); edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", n_sads, - get_dram_attr(reg), + show_dram_attr(pvt->info.dram_attr(reg)), gb, (mb*1000)/1024, ((u64)tmp_mb) << 20L, - INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", + pvt->info.show_interleave_mode(reg), reg); prv = limit; @@ -1101,6 +1820,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci) } } + if (pvt->info.type == KNIGHTS_LANDING) + return; + /* * Step 3) Get TAD range */ @@ -1248,7 +1970,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, if (!DRAM_RULE_ENABLE(reg)) continue; - limit = SAD_LIMIT(reg); + limit = pvt->info.sad_limit(reg); if (limit <= prv) { sprintf(msg, "Can't discover the memory socket"); return -EINVAL; @@ -1262,8 +1984,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci, return -EINVAL; } dram_rule = reg; - *area_type = get_dram_attr(dram_rule); - interleave_mode = INTERLEAVE_MODE(dram_rule); + *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule)); + interleave_mode = pvt->info.interleave_mode(dram_rule); pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); @@ -1567,7 +2289,8 @@ static void sbridge_put_all_devices(void) static int sbridge_get_onedevice(struct pci_dev **prev, u8 *num_mc, const struct pci_id_table *table, - const unsigned devno) + const unsigned devno, + const int multi_bus) { struct sbridge_dev *sbridge_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; @@ -1603,7 +2326,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev, } bus = pdev->bus->number; - sbridge_dev = get_sbridge_dev(bus); + sbridge_dev = get_sbridge_dev(bus, multi_bus); if (!sbridge_dev) { sbridge_dev = alloc_sbridge_dev(bus, table); if (!sbridge_dev) { @@ -1652,21 +2375,32 @@ static int sbridge_get_onedevice(struct pci_dev **prev, * @num_mc: pointer to the memory controllers count, to be incremented in case * of success. * @table: model specific table + * @allow_dups: allow for multiple devices to exist with the same device id + * (as implemented, this isn't expected to work correctly in the + * multi-socket case). + * @multi_bus: don't assume devices on different buses belong to different + * memory controllers. * * returns 0 in case of success or error code */ -static int sbridge_get_all_devices(u8 *num_mc, - const struct pci_id_table *table) +static int sbridge_get_all_devices_full(u8 *num_mc, + const struct pci_id_table *table, + int allow_dups, + int multi_bus) { int i, rc; struct pci_dev *pdev = NULL; while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { - pdev = NULL; + if (!allow_dups || i == 0 || + table->descr[i].dev_id != + table->descr[i-1].dev_id) { + pdev = NULL; + } do { rc = sbridge_get_onedevice(&pdev, num_mc, - table, i); + table, i, multi_bus); if (rc < 0) { if (i == 0) { i = table->n_devs; @@ -1675,7 +2409,7 @@ static int sbridge_get_all_devices(u8 *num_mc, sbridge_put_all_devices(); return -ENODEV; } - } while (pdev); + } while (pdev && !allow_dups); } table++; } @@ -1683,6 +2417,11 @@ static int sbridge_get_all_devices(u8 *num_mc, return 0; } +#define sbridge_get_all_devices(num_mc, table) \ + sbridge_get_all_devices_full(num_mc, table, 0, 0) +#define sbridge_get_all_devices_knl(num_mc, table) \ + sbridge_get_all_devices_full(num_mc, table, 1, 1) + static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, struct sbridge_dev *sbridge_dev) { @@ -2038,6 +2777,131 @@ enodev: return -ENODEV; } +static int knl_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) +{ + struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pdev; + int dev, func; + + int i; + int devidx; + + for (i = 0; i < sbridge_dev->n_devs; i++) { + pdev = sbridge_dev->pdev[i]; + if (!pdev) + continue; + + /* Extract PCI device and function. */ + dev = (pdev->devfn >> 3) & 0x1f; + func = pdev->devfn & 0x7; + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_KNL_IMC_MC: + if (dev == 8) + pvt->knl.pci_mc0 = pdev; + else if (dev == 9) + pvt->knl.pci_mc1 = pdev; + else { + sbridge_printk(KERN_ERR, + "Memory controller in unexpected place! (dev %d, fn %d)\n", + dev, func); + continue; + } + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0: + pvt->pci_sad0 = pdev; + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1: + pvt->pci_sad1 = pdev; + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA: + /* There are one of these per tile, and range from + * 1.14.0 to 1.18.5. + */ + devidx = ((dev-14)*8)+func; + + if (devidx < 0 || devidx >= KNL_MAX_CHAS) { + sbridge_printk(KERN_ERR, + "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n", + dev, func); + continue; + } + + WARN_ON(pvt->knl.pci_cha[devidx] != NULL); + + pvt->knl.pci_cha[devidx] = pdev; + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL: + devidx = -1; + + /* + * MC0 channels 0-2 are device 9 function 2-4, + * MC1 channels 3-5 are device 8 function 2-4. + */ + + if (dev == 9) + devidx = func-2; + else if (dev == 8) + devidx = 3 + (func-2); + + if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) { + sbridge_printk(KERN_ERR, + "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n", + dev, func); + continue; + } + + WARN_ON(pvt->knl.pci_channel[devidx] != NULL); + pvt->knl.pci_channel[devidx] = pdev; + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM: + pvt->knl.pci_mc_info = pdev; + break; + + case PCI_DEVICE_ID_INTEL_KNL_IMC_TA: + pvt->pci_ta = pdev; + break; + + default: + sbridge_printk(KERN_ERR, "Unexpected device %d\n", + pdev->device); + break; + } + } + + if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 || + !pvt->pci_sad0 || !pvt->pci_sad1 || + !pvt->pci_ta) { + goto enodev; + } + + for (i = 0; i < KNL_MAX_CHANNELS; i++) { + if (!pvt->knl.pci_channel[i]) { + sbridge_printk(KERN_ERR, "Missing channel %d\n", i); + goto enodev; + } + } + + for (i = 0; i < KNL_MAX_CHAS; i++) { + if (!pvt->knl.pci_cha[i]) { + sbridge_printk(KERN_ERR, "Missing CHA %d\n", i); + goto enodev; + } + } + + return 0; + +enodev: + sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); + return -ENODEV; +} + /**************************************************************************** Error check routines ****************************************************************************/ @@ -2127,8 +2991,36 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, if (!GET_BITFIELD(m->status, 58, 58)) return; - rc = get_memory_error_data(mci, m->addr, &socket, &ha, - &channel_mask, &rank, &area_type, msg); + if (pvt->info.type == KNIGHTS_LANDING) { + if (channel == 14) { + edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) + ? " recoverable" : "", + mscod, errcode, + m->bank); + } else { + char A = *("A"); + + channel = knl_channel_remap(channel); + channel_mask = 1 << channel; + snprintf(msg, sizeof(msg), + "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) + ? " recoverable" : " ", + mscod, errcode, channel, A + channel); + edac_mc_handle_error(tp_event, mci, core_err_cnt, + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, + channel, 0, -1, + optype, msg); + } + return; + } else { + rc = get_memory_error_data(mci, m->addr, &socket, &ha, + &channel_mask, &rank, &area_type, msg); + } + if (rc < 0) goto err_parsing; new_mci = get_mci_for_node_id(socket); @@ -2359,10 +3251,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) /* allocate a new MC control structure */ layers[0].type = EDAC_MC_LAYER_CHANNEL; - layers[0].size = NUM_CHANNELS; + layers[0].size = type == KNIGHTS_LANDING ? + KNL_MAX_CHANNELS : NUM_CHANNELS; layers[0].is_virt_csrow = false; layers[1].type = EDAC_MC_LAYER_SLOT; - layers[1].size = MAX_DIMMS; + layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS; layers[1].is_virt_csrow = true; mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, sizeof(*pvt)); @@ -2380,7 +3273,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->sbridge_dev = sbridge_dev; sbridge_dev->mci = mci; - mci->mtype_cap = MEM_FLAG_DDR3; + mci->mtype_cap = type == KNIGHTS_LANDING ? + MEM_FLAG_DDR4 : MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "sbridge_edac.c"; @@ -2401,6 +3295,10 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.get_memory_type = get_memory_type; pvt->info.get_node_id = get_node_id; pvt->info.rir_limit = rir_limit; + pvt->info.sad_limit = sad_limit; + pvt->info.interleave_mode = interleave_mode; + pvt->info.show_interleave_mode = show_interleave_mode; + pvt->info.dram_attr = dram_attr; pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); @@ -2421,6 +3319,10 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.get_memory_type = get_memory_type; pvt->info.get_node_id = get_node_id; pvt->info.rir_limit = rir_limit; + pvt->info.sad_limit = sad_limit; + pvt->info.interleave_mode = interleave_mode; + pvt->info.show_interleave_mode = show_interleave_mode; + pvt->info.dram_attr = dram_attr; pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); pvt->info.interleave_list = sbridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); @@ -2441,6 +3343,10 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.get_memory_type = haswell_get_memory_type; pvt->info.get_node_id = haswell_get_node_id; pvt->info.rir_limit = haswell_rir_limit; + pvt->info.sad_limit = sad_limit; + pvt->info.interleave_mode = interleave_mode; + pvt->info.show_interleave_mode = show_interleave_mode; + pvt->info.dram_attr = dram_attr; pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); @@ -2461,6 +3367,10 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.get_memory_type = haswell_get_memory_type; pvt->info.get_node_id = haswell_get_node_id; pvt->info.rir_limit = haswell_rir_limit; + pvt->info.sad_limit = sad_limit; + pvt->info.interleave_mode = interleave_mode; + pvt->info.show_interleave_mode = show_interleave_mode; + pvt->info.dram_attr = dram_attr; pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); @@ -2473,6 +3383,30 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) if (unlikely(rc < 0)) goto fail0; break; + case KNIGHTS_LANDING: + /* pvt->info.rankcfgr == ??? */ + pvt->info.get_tolm = knl_get_tolm; + pvt->info.get_tohm = knl_get_tohm; + pvt->info.dram_rule = knl_dram_rule; + pvt->info.get_memory_type = knl_get_memory_type; + pvt->info.get_node_id = knl_get_node_id; + pvt->info.rir_limit = NULL; + pvt->info.sad_limit = knl_sad_limit; + pvt->info.interleave_mode = knl_interleave_mode; + pvt->info.show_interleave_mode = knl_show_interleave_mode; + pvt->info.dram_attr = dram_attr_knl; + pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule); + pvt->info.interleave_list = knl_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list); + pvt->info.interleave_pkg = ibridge_interleave_pkg; + pvt->info.get_width = knl_get_width; + mci->ctl_name = kasprintf(GFP_KERNEL, + "Knights Landing Socket#%d", mci->mc_idx); + + rc = knl_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + break; } /* Get dimm basic config and the memory layout */ @@ -2527,20 +3461,29 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: - rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); + rc = sbridge_get_all_devices(&num_mc, + pci_dev_descr_ibridge_table); type = IVY_BRIDGE; break; case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: - rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); + rc = sbridge_get_all_devices(&num_mc, + pci_dev_descr_sbridge_table); type = SANDY_BRIDGE; break; case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: - rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table); + rc = sbridge_get_all_devices(&num_mc, + pci_dev_descr_haswell_table); type = HASWELL; break; case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: - rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_broadwell_table); + rc = sbridge_get_all_devices(&num_mc, + pci_dev_descr_broadwell_table); type = BROADWELL; + break; + case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0: + rc = sbridge_get_all_devices_knl(&num_mc, + pci_dev_descr_knl_table); + type = KNIGHTS_LANDING; break; } if (unlikely(rc < 0)) { diff --git a/drivers/edac/wq.c b/drivers/edac/wq.c new file mode 100644 index 000000000000..1b8c07e44fd8 --- /dev/null +++ b/drivers/edac/wq.c @@ -0,0 +1,42 @@ +#include "edac_module.h" + +static struct workqueue_struct *wq; + +bool edac_queue_work(struct delayed_work *work, unsigned long delay) +{ + return queue_delayed_work(wq, work, delay); +} +EXPORT_SYMBOL_GPL(edac_queue_work); + +bool edac_mod_work(struct delayed_work *work, unsigned long delay) +{ + return mod_delayed_work(wq, work, delay); +} +EXPORT_SYMBOL_GPL(edac_mod_work); + +bool edac_stop_work(struct delayed_work *work) +{ + bool ret; + + ret = cancel_delayed_work_sync(work); + flush_workqueue(wq); + + return ret; +} +EXPORT_SYMBOL_GPL(edac_stop_work); + +int edac_workqueue_setup(void) +{ + wq = create_singlethread_workqueue("edac-poller"); + if (!wq) + return -ENODEV; + else + return 0; +} + +void edac_workqueue_teardown(void) +{ + flush_workqueue(wq); + destroy_workqueue(wq); + wq = NULL; +} diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index cf478fe6b335..49a3a1185bb6 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -173,6 +173,9 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config HAVE_ARM_SMCCC + bool + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index ac1ce4a73edf..0e08e665f715 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -521,6 +521,7 @@ static int __init dmi_present(const u8 *buf) dmi_ver = smbios_ver; else dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F); + dmi_ver <<= 8; dmi_num = get_unaligned_le16(buf + 12); dmi_len = get_unaligned_le16(buf + 6); dmi_base = get_unaligned_le32(buf + 8); @@ -528,15 +529,14 @@ static int __init dmi_present(const u8 *buf) if (dmi_walk_early(dmi_decode) == 0) { if (smbios_ver) { pr_info("SMBIOS %d.%d present.\n", - dmi_ver >> 8, dmi_ver & 0xFF); + dmi_ver >> 16, (dmi_ver >> 8) & 0xFF); } else { smbios_entry_point_size = 15; memcpy(smbios_entry_point, buf, smbios_entry_point_size); pr_info("Legacy DMI %d.%d present.\n", - dmi_ver >> 8, dmi_ver & 0xFF); + dmi_ver >> 16, (dmi_ver >> 8) & 0xFF); } - dmi_ver <<= 8; dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string)); printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string); return 0; diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index ec379a4164cc..62e654f255f4 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -18,3 +18,7 @@ obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o obj-$(CONFIG_EFI_STUB) += libstub/ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o + +arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o +obj-$(CONFIG_ARM) += $(arm-obj-y) +obj-$(CONFIG_ARM64) += $(arm-obj-y) diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c new file mode 100644 index 000000000000..9e15d571b53c --- /dev/null +++ b/drivers/firmware/efi/arm-init.c @@ -0,0 +1,209 @@ +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/memblock.h> +#include <linux/mm_types.h> +#include <linux/of.h> +#include <linux/of_fdt.h> + +#include <asm/efi.h> + +struct efi_memory_map memmap; + +u64 efi_system_table; + +static int __init is_normal_ram(efi_memory_desc_t *md) +{ + if (md->attribute & EFI_MEMORY_WB) + return 1; + return 0; +} + +/* + * Translate a EFI virtual address into a physical address: this is necessary, + * as some data members of the EFI system table are virtually remapped after + * SetVirtualAddressMap() has been called. + */ +static phys_addr_t efi_to_phys(unsigned long addr) +{ + efi_memory_desc_t *md; + + for_each_efi_memory_desc(&memmap, md) { + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + /* no virtual mapping has been installed by the stub */ + break; + if (md->virt_addr <= addr && + (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) + return md->phys_addr + addr - md->virt_addr; + } + return addr; +} + +static int __init uefi_init(void) +{ + efi_char16_t *c16; + void *config_tables; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + efi.systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (efi.systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((efi.systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff); + + /* Show what we know for posterity */ + c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; + config_tables = early_memremap(efi_to_phys(efi.systab->tables), + table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, + sizeof(efi_config_table_t), NULL); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(efi.systab, sizeof(efi_system_table_t)); + return retval; +} + +/* + * Return true for RAM regions we want to permanently reserve. + */ +static __init int is_reserve_region(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + return 0; + default: + break; + } + return is_normal_ram(md); +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(&memmap, md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_normal_ram(md)) + early_init_dt_add_memory_arch(paddr, size); + + if (is_reserve_region(md)) { + memblock_mark_nomap(paddr, size); + if (efi_enabled(EFI_DBG)) + pr_cont("*"); + } + + if (efi_enabled(EFI_DBG)) + pr_cont("\n"); + } + + set_bit(EFI_MEMMAP, &efi.flags); +} + +void __init efi_init(void) +{ + struct efi_fdt_params params; + + /* Grab UEFI information placed in FDT by stub */ + if (!efi_get_fdt_params(¶ms)) + return; + + efi_system_table = params.system_table; + + memmap.phys_map = params.mmap; + memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + memmap.map_end = memmap.map + params.mmap_size; + memmap.desc_size = params.desc_size; + memmap.desc_version = params.desc_ver; + + if (uefi_init() < 0) + return; + + reserve_regions(); + early_memunmap(memmap.map, params.mmap_size); + memblock_mark_nomap(params.mmap & PAGE_MASK, + PAGE_ALIGN(params.mmap_size + + (params.mmap & ~PAGE_MASK))); +} diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c new file mode 100644 index 000000000000..6ae21e41a429 --- /dev/null +++ b/drivers/firmware/efi/arm-runtime.c @@ -0,0 +1,135 @@ +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/efi.h> +#include <linux/io.h> +#include <linux/memblock.h> +#include <linux/mm_types.h> +#include <linux/preempt.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <asm/cacheflush.h> +#include <asm/efi.h> +#include <asm/mmu.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> + +extern u64 efi_system_table; + +static struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), +}; + +static bool __init efi_virtmap_init(void) +{ + efi_memory_desc_t *md; + + efi_mm.pgd = pgd_alloc(&efi_mm); + init_new_context(NULL, &efi_mm); + + for_each_efi_memory_desc(&memmap, md) { + phys_addr_t phys = md->phys_addr; + int ret; + + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + return false; + + ret = efi_create_mapping(&efi_mm, md); + if (!ret) { + pr_info(" EFI remap %pa => %p\n", + &phys, (void *)(unsigned long)md->virt_addr); + } else { + pr_warn(" EFI remap %pa: failed to create mapping (%d)\n", + &phys, ret); + return false; + } + } + return true; +} + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init arm_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + pr_info("Remapping and enabling EFI services.\n"); + + mapsize = memmap.map_end - memmap.map; + memmap.map = (__force void *)ioremap_cache(memmap.phys_map, + mapsize); + if (!memmap.map) { + pr_err("Failed to remap EFI memory map\n"); + return -ENOMEM; + } + memmap.map_end = memmap.map + mapsize; + efi.memmap = &memmap; + + efi.systab = (__force void *)ioremap_cache(efi_system_table, + sizeof(efi_system_table_t)); + if (!efi.systab) { + pr_err("Failed to remap EFI System Table\n"); + return -ENOMEM; + } + set_bit(EFI_SYSTEM_TABLES, &efi.flags); + + if (!efi_virtmap_init()) { + pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); + return -ENOMEM; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + efi.runtime_version = efi.systab->hdr.revision; + + return 0; +} +early_initcall(arm_enable_runtime_services); + +void efi_virtmap_load(void) +{ + preempt_disable(); + efi_set_pgd(&efi_mm); +} + +void efi_virtmap_unload(void) +{ + efi_set_pgd(current->active_mm); + preempt_enable(); +} diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 027ca212179f..cffa89b3317b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -25,6 +25,8 @@ #include <linux/io.h> #include <linux/platform_device.h> +#include <asm/efi.h> + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 3c0467d3688c..9c12e18031d5 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ - -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING + -mno-mmx -mno-sse cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ @@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt -KBUILD_CFLAGS := $(cflags-y) \ +KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) @@ -34,6 +34,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \ $(patsubst %.c,lib-%.o,$(arm-deps)) +lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += arm64-stub.o CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) @@ -67,3 +68,11 @@ quiet_cmd_stubcopy = STUBCPY $@ $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \ && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \ rm -f $@; /bin/false); else /bin/false; fi + +# +# ARM discards the .data section because it disallows r/w data in the +# decompressor. So move our .data to .data.efistub, which is preserved +# explicitly by the decompressor linker script. +# +STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub +STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 950c87f5d279..3397902e4040 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -303,8 +303,10 @@ fail: * The value chosen is the largest non-zero power of 2 suitable for this purpose * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can * be mapped efficiently. + * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split, + * map everything below 1 GB. */ -#define EFI_RT_VIRTUAL_BASE 0x40000000 +#define EFI_RT_VIRTUAL_BASE SZ_512M static int cmp_mem_desc(const void *l, const void *r) { diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c new file mode 100644 index 000000000000..495ebd657e38 --- /dev/null +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/efi.h> +#include <asm/efi.h> + +efi_status_t handle_kernel_image(efi_system_table_t *sys_table, + unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image) +{ + unsigned long nr_pages; + efi_status_t status; + /* Use alloc_addr to tranlsate between types */ + efi_physical_addr_t alloc_addr; + + /* + * Verify that the DRAM base address is compatible with the ARM + * boot protocol, which determines the base of DRAM by masking + * off the low 27 bits of the address at which the zImage is + * loaded. These assumptions are made by the decompressor, + * before any memory map is available. + */ + dram_base = round_up(dram_base, SZ_128M); + + /* + * Reserve memory for the uncompressed kernel image. This is + * all that prevents any future allocations from conflicting + * with the kernel. Since we can't tell from the compressed + * image how much DRAM the kernel actually uses (due to BSS + * size uncertainty) we allocate the maximum possible size. + * Do this very early, as prints can cause memory allocations + * that may conflict with this. + */ + alloc_addr = dram_base; + *reserve_size = MAX_UNCOMP_KERNEL_SIZE; + nr_pages = round_up(*reserve_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; + status = sys_table->boottime->allocate_pages(EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + nr_pages, &alloc_addr); + if (status != EFI_SUCCESS) { + *reserve_size = 0; + pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); + return status; + } + *reserve_addr = alloc_addr; + + /* + * Relocate the zImage, so that it appears in the lowest 128 MB + * memory window. + */ + *image_size = image->image_size; + status = efi_relocate_kernel(sys_table, image_addr, *image_size, + *image_size, + dram_base + MAX_UNCOMP_KERNEL_SIZE, 0); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to relocate kernel.\n"); + efi_free(sys_table, *reserve_size, *reserve_addr); + *reserve_size = 0; + return status; + } + + /* + * Check to see if we were able to allocate memory low enough + * in memory. The kernel determines the base of DRAM from the + * address at which the zImage is loaded. + */ + if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) { + pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n"); + efi_free(sys_table, *reserve_size, *reserve_addr); + *reserve_size = 0; + efi_free(sys_table, *image_size, *image_addr); + *image_size = 0; + return EFI_LOAD_ERROR; + } + return EFI_SUCCESS; +} diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d24f35d74b27..f25cd79c8a79 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "psci: " fmt +#include <linux/arm-smccc.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/of.h> @@ -58,8 +59,6 @@ struct psci_operations psci_ops; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); -asmlinkage psci_fn __invoke_psci_fn_hvc; -asmlinkage psci_fn __invoke_psci_fn_smc; static psci_fn *invoke_psci_fn; enum psci_function { @@ -107,6 +106,26 @@ bool psci_power_state_is_valid(u32 state) return !(state & ~valid_mask); } +static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + static int psci_to_linux_errno(int errno) { switch (errno) { diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index e5827a56ff3b..5eaea8b812cf 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset, __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); __raw_writel( - __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), + __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset), ctrl->base + AR71XX_GPIO_REG_OE); spin_unlock_irqrestore(&ctrl->lock, flags); diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index bd5193c67a9c..88ae70ddb127 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio) unsigned long pinmask = bgc->pin2mask(bgc, gpio); if (bgc->dir & pinmask) - return bgc->read_reg(bgc->reg_set) & pinmask; + return !!(bgc->read_reg(bgc->reg_set) & pinmask); else - return bgc->read_reg(bgc->reg_dat) & pinmask; + return !!(bgc->read_reg(bgc->reg_dat) & pinmask); } static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 16a7b6816744..cbbb67a6f1d6 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -417,10 +417,15 @@ static int acpi_find_gpio(struct acpi_resource *ares, void *data) * ActiveLow is only specified for GpioInt resource. If * GpioIo is used then the only way to set the flag is * to use _DSD "gpios" property. + * Note: we expect here: + * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW + * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH */ - if (lookup->info.gpioint) - lookup->info.active_low = - agpio->polarity == ACPI_ACTIVE_LOW; + if (lookup->info.gpioint) { + lookup->info.polarity = agpio->polarity; + lookup->info.triggering = agpio->triggering; + } + } return 1; @@ -447,7 +452,7 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup, if (info) { *info = lookup->info; if (lookup->active_low) - info->active_low = lookup->active_low; + info->polarity = lookup->active_low; } return 0; } @@ -595,6 +600,7 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode, int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { int idx, i; + unsigned int irq_flags; for (i = 0, idx = 0; idx <= index; i++) { struct acpi_gpio_info info; @@ -603,8 +609,23 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); if (IS_ERR(desc)) break; - if (info.gpioint && idx++ == index) - return gpiod_to_irq(desc); + if (info.gpioint && idx++ == index) { + int irq = gpiod_to_irq(desc); + + if (irq < 0) + return irq; + + irq_flags = acpi_dev_get_irq_type(info.triggering, + info.polarity); + + /* Set type if specified and different than the current one */ + if (irq_flags != IRQ_TYPE_NONE && + irq_flags != irq_get_trigger_type(irq)) + irq_set_irq_type(irq, irq_flags); + + return irq; + } + } return -ENOENT; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 2a91f3287e3b..5d8d7ab96916 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc) chip = desc->chip; offset = gpio_chip_hwgpio(desc); value = chip->get ? chip->get(chip, offset) : -EIO; - value = value < 0 ? value : !!value; + /* + * FIXME: fix all drivers to clamp to [0,1] or return negative, + * then change this to: + * value = value < 0 ? value : !!value; + * so we can properly propagate error codes. + */ + value = !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } @@ -1873,7 +1879,7 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, return desc; } - if (info.active_low) + if (info.polarity == GPIO_ACTIVE_LOW) *flags |= GPIO_ACTIVE_LOW; return desc; @@ -2211,7 +2217,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, desc = acpi_node_get_gpiod(fwnode, propname, 0, &info); if (!IS_ERR(desc)) - active_low = info.active_low; + active_low = info.polarity == GPIO_ACTIVE_LOW; } if (IS_ERR(desc)) diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 98ab08c0aa2d..5ac3b88a2e0a 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -26,7 +26,8 @@ struct acpi_device; */ struct acpi_gpio_info { bool gpioint; - bool active_low; + int polarity; + int triggering; }; /* gpio suffixes used for ACPI and device tree lookup */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5a5f04d0902d..048cfe073dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser { struct ww_acquire_ctx ticket; /* user fence */ - struct amdgpu_user_fence uf; + struct amdgpu_user_fence uf; + struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4f352ec9dec4..25a3e2485cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, + struct drm_amdgpu_cs_chunk_fence *fence_data) +{ + struct drm_gem_object *gobj; + uint32_t handle; + + handle = fence_data->handle; + gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, + fence_data->handle); + if (gobj == NULL) + return -EINVAL; + + p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + p->uf.offset = fence_data->offset; + + if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EINVAL; + } + + p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); + p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.priority = 0; + p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; + p->uf_entry.tv.shared = true; + + drm_gem_object_unreference_unlocked(gobj); + return 0; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) case AMDGPU_CHUNK_ID_FENCE: size = sizeof(struct drm_amdgpu_cs_chunk_fence); - if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { - uint32_t handle; - struct drm_gem_object *gobj; - struct drm_amdgpu_cs_chunk_fence *fence_data; - - fence_data = (void *)p->chunks[i].kdata; - handle = fence_data->handle; - gobj = drm_gem_object_lookup(p->adev->ddev, - p->filp, handle); - if (gobj == NULL) { - ret = -EINVAL; - goto free_partial_kdata; - } - - p->uf.bo = gem_to_amdgpu_bo(gobj); - amdgpu_bo_ref(p->uf.bo); - drm_gem_object_unreference_unlocked(gobj); - p->uf.offset = fence_data->offset; - } else { + if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { ret = -EINVAL; goto free_partial_kdata; } + + ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); + if (ret) + goto free_partial_kdata; + break; case AMDGPU_CHUNK_ID_DEPENDENCIES: @@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, &p->validated); + if (p->uf.bo) + list_add(&p->uf_entry.tv.head, &p->validated); + if (need_mmap_lock) down_read(¤t->mm->mmap_sem); @@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->num_ibs; i++) amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); - if (parser->uf.bo) - amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf_entry.robj); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index c6a1b4cc6458..d321222fd92e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -559,19 +559,10 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, /* this is the actual buffer to work with */ - args_buff = kmalloc(args->buf_size_in_bytes - - sizeof(*args), GFP_KERNEL); - if (args_buff == NULL) - return -ENOMEM; - - status = copy_from_user(args_buff, cmd_from_user, + args_buff = memdup_user(args_buff, args->buf_size_in_bytes - sizeof(*args)); - - if (status != 0) { - pr_debug("Failed to copy address watch user data\n"); - kfree(args_buff); - return -EINVAL; - } + if (IS_ERR(args_buff)) + return PTR_ERR(args_buff); aw_info.process = p; @@ -677,22 +668,12 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (cmd_from_user == NULL) return -EINVAL; - /* this is the actual buffer to work with */ + /* copy the entire buffer from user */ - args_buff = kmalloc(args->buf_size_in_bytes - sizeof(*args), - GFP_KERNEL); - - if (args_buff == NULL) - return -ENOMEM; - - /* Now copy the entire buffer from user */ - status = copy_from_user(args_buff, cmd_from_user, + args_buff = memdup_user(cmd_from_user, args->buf_size_in_bytes - sizeof(*args)); - if (status != 0) { - pr_debug("Failed to copy wave control user data\n"); - kfree(args_buff); - return -EINVAL; - } + if (IS_ERR(args_buff)) + return PTR_ERR(args_buff); /* move ptr to the start of the "pay-load" area */ wac_info.process = p; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a18164f2f6d2..f8b5fcfa91a2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect mode_flags |= DRM_MODE_FLAG_3D_MASK; list_for_each_entry(mode, &connector->modes, head) { - mode->status = drm_mode_validate_basic(mode); + if (mode->status == MODE_OK) + mode->status = drm_mode_validate_basic(mode); if (mode->status == MODE_OK) mode->status = drm_mode_validate_size(mode, maxX, maxY); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index b3ba27fd9a6b..e69357172ffb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + if (!state->enable) + return 0; + if (exynos_crtc->ops->atomic_check) return exynos_crtc->ops->atomic_check(exynos_crtc, state); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a01e51581c4c..f4af19a0d569 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2193,8 +2193,17 @@ struct drm_i915_gem_request { struct drm_i915_private *i915; struct intel_engine_cs *ring; - /** GEM sequence number associated with this request. */ - uint32_t seqno; + /** GEM sequence number associated with the previous request, + * when the HWS breadcrumb is equal to this the GPU is processing + * this request. + */ + u32 previous_seqno; + + /** GEM sequence number associated with this request, + * when the HWS breadcrumb is equal or greater than this the GPU + * has finished processing this request. + */ + u32 seqno; /** Position in the ringbuffer of the start of the request */ u32 head; @@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma); /* * BEWARE: Do not use the function below unless you can _absolutely_ @@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } +static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, + bool lazy_coherency) +{ + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); + return i915_seqno_passed(seqno, req->previous_seqno); +} + static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno; - - BUG_ON(req == NULL); - - seqno = req->ring->get_seqno(req->ring, lazy_coherency); - + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); return i915_seqno_passed(seqno, req->seqno); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 32e6aade6223..f56af0aaafde 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv, return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } -static int __i915_spin_request(struct drm_i915_gem_request *req) +static unsigned long local_clock_us(unsigned *cpu) +{ + unsigned long t; + + /* Cheaply and approximately convert from nanoseconds to microseconds. + * The result and subsequent calculations are also defined in the same + * approximate microseconds units. The principal source of timing + * error here is from the simple truncation. + * + * Note that local_clock() is only defined wrt to the current CPU; + * the comparisons are no longer valid if we switch CPUs. Instead of + * blocking preemption for the entire busywait, we can detect the CPU + * switch and use that as indicator of system load and a reason to + * stop busywaiting, see busywait_stop(). + */ + *cpu = get_cpu(); + t = local_clock() >> 10; + put_cpu(); + + return t; +} + +static bool busywait_stop(unsigned long timeout, unsigned cpu) +{ + unsigned this_cpu; + + if (time_after(local_clock_us(&this_cpu), timeout)) + return true; + + return this_cpu != cpu; +} + +static int __i915_spin_request(struct drm_i915_gem_request *req, int state) { unsigned long timeout; + unsigned cpu; + + /* When waiting for high frequency requests, e.g. during synchronous + * rendering split between the CPU and GPU, the finite amount of time + * required to set up the irq and wait upon it limits the response + * rate. By busywaiting on the request completion for a short while we + * can service the high frequency waits as quick as possible. However, + * if it is a slow request, we want to sleep as quickly as possible. + * The tradeoff between waiting and sleeping is roughly the time it + * takes to sleep on a request, on the order of a microsecond. + */ - if (i915_gem_request_get_ring(req)->irq_refcount) + if (req->ring->irq_refcount) return -EBUSY; - timeout = jiffies + 1; + /* Only spin if we know the GPU is processing this request */ + if (!i915_gem_request_started(req, true)) + return -EAGAIN; + + timeout = local_clock_us(&cpu) + 5; while (!need_resched()) { if (i915_gem_request_completed(req, true)) return 0; - if (time_after_eq(jiffies, timeout)) + if (signal_pending_state(state, current)) + break; + + if (busywait_stop(timeout, cpu)) break; cpu_relax_lowlatency(); } + if (i915_gem_request_completed(req, false)) return 0; @@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); + int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before, now; @@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req); + ret = __i915_spin_request(req, state); if (ret == 0) goto out; @@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, for (;;) { struct timer_list timer; - prepare_to_wait(&ring->irq_queue, &wait, - interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + prepare_to_wait(&ring->irq_queue, &wait, state); /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ @@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, break; } - if (interruptible && signal_pending(current)) { + if (signal_pending_state(state, current)) { ret = -ERESTARTSYS; break; } @@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, request->batch_obj = obj; request->emitted_jiffies = jiffies; + request->previous_seqno = ring->last_submitted_seqno; ring->last_submitted_seqno = request->seqno; list_add_tail(&request->list, &ring->request_list); @@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + bool mappable, fenceable; + u32 fence_size, fence_alignment; + + fence_size = i915_gem_get_gtt_size(obj->base.dev, + obj->base.size, + obj->tiling_mode); + fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, + obj->base.size, + obj->tiling_mode, + true); + + fenceable = (vma->node.size == fence_size && + (vma->node.start & (fence_alignment - 1)) == 0); + + mappable = (vma->node.start + fence_size <= + to_i915(obj->base.dev)->gtt.mappable_end); + + obj->map_and_fenceable = mappable && fenceable; +} + static int i915_gem_object_do_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && (bound ^ vma->bound) & GLOBAL_BIND) { - bool mappable, fenceable; - u32 fence_size, fence_alignment; - - fence_size = i915_gem_get_gtt_size(obj->base.dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, - obj->base.size, - obj->tiling_mode, - true); - - fenceable = (vma->node.size == fence_size && - (vma->node.start & (fence_alignment - 1)) == 0); - - mappable = (vma->node.start + fence_size <= - dev_priv->gtt.mappable_end); - - obj->map_and_fenceable = mappable && fenceable; - + __i915_vma_set_map_and_fenceable(vma); WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8c688a5f1589..02ceb7a4b481 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx) if (!ppgtt) return; - WARN_ON(!list_empty(&ppgtt->base.active_list)); - list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, mm_list) { if (WARN_ON(__i915_vma_unbind_no_wait(vma))) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 43f35d12b677..86c7500454b4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, return ret; } vma->bound |= GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index cdacf3f5b77a..87e919a06b27 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, } vma->bound |= GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); list_add_tail(&vma->mm_list, &ggtt->inactive_list); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 22e86d2e408d..32cf97346978 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); +static void intel_pre_disable_primary(struct drm_crtc *crtc); typedef struct { int min, max; @@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; + struct drm_crtc_state *crtc_state = intel_crtc->base.state; + struct intel_plane *intel_plane = to_intel_plane(primary); struct drm_framebuffer *fb; if (!plane_config->fb) @@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, } } + /* + * We've failed to reconstruct the BIOS FB. Current display state + * indicates that the primary plane is visible, but has a NULL FB, + * which will lead to problems later if we don't fix it up. The + * simplest solution is to just disable the primary plane now and + * pretend the BIOS never had it enabled. + */ + to_intel_plane_state(plane_state)->visible = false; + crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); + intel_pre_disable_primary(&intel_crtc->base); + intel_plane->disable_plane(primary, &intel_crtc->base); + return; valid_fb: @@ -6309,9 +6324,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) if (to_intel_plane_state(crtc->primary->state)->visible) { intel_crtc_wait_for_pending_flips(crtc); intel_pre_disable_primary(crtc); + + intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); + to_intel_plane_state(crtc->primary->state)->visible = false; } - intel_crtc_disable_planes(crtc, crtc->state->plane_mask); dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; intel_update_watermarks(crtc); @@ -9908,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, return true; } -static void i845_update_cursor(struct drm_crtc *crtc, u32 base) +static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (base) { + if (on) { unsigned int width = intel_crtc->base.cursor->state->crtc_w; unsigned int height = intel_crtc->base.cursor->state->crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; @@ -9970,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) } } -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - uint32_t cntl; + uint32_t cntl = 0; - cntl = 0; - if (base) { + if (on) { cntl = MCURSOR_GAMMA_ENABLE; switch (intel_crtc->base.cursor->state->crtc_w) { case 64: @@ -10030,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, int y = cursor_state->crtc_y; u32 base = 0, pos = 0; - if (on) - base = intel_crtc->cursor_addr; + base = intel_crtc->cursor_addr; if (x >= intel_crtc->config->pipe_src_w) - base = 0; + on = false; if (y >= intel_crtc->config->pipe_src_h) - base = 0; + on = false; if (x < 0) { if (x + cursor_state->crtc_w <= 0) - base = 0; + on = false; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; x = -x; @@ -10050,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, if (y < 0) { if (y + cursor_state->crtc_h <= 0) - base = 0; + on = false; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; y = -y; } pos |= y << CURSOR_Y_SHIFT; - if (base == 0 && intel_crtc->cursor_base == 0) - return; - I915_WRITE(CURPOS(pipe), pos); /* ILK+ do this automagically */ @@ -10070,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, } if (IS_845G(dev) || IS_I865G(dev)) - i845_update_cursor(crtc, base); + i845_update_cursor(crtc, base, on); else - i9xx_update_cursor(crtc, base); + i9xx_update_cursor(crtc, base, on); } static bool cursor_size_ok(struct drm_device *dev, @@ -12111,18 +12123,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, static bool check_digital_port_conflicts(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_encoder *encoder; struct drm_connector *connector; - struct drm_connector_state *connector_state; unsigned int used_ports = 0; - int i; /* * Walk the connector list instead of the encoder * list to detect the problem on ddi platforms * where there's just one encoder per digital port. */ - for_each_connector_in_state(state, connector, connector_state, i) { + drm_for_each_connector(connector, dev) { + struct drm_connector_state *connector_state; + struct intel_encoder *encoder; + + connector_state = drm_atomic_get_existing_connector_state(state, connector); + if (!connector_state) + connector_state = connector->state; + if (!connector_state->best_encoder) continue; @@ -13716,6 +13732,7 @@ intel_check_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_framebuffer *fb = state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); + enum pipe pipe = to_intel_plane(plane)->pipe; unsigned stride; int ret; @@ -13749,6 +13766,22 @@ intel_check_cursor_plane(struct drm_plane *plane, return -EINVAL; } + /* + * There's something wrong with the cursor on CHV pipe C. + * If it straddles the left edge of the screen then + * moving it away from the edge or disabling it often + * results in a pipe underrun, and often that can lead to + * dead pipe (constant underrun reported, and it scans + * out just a solid color). To recover from that, the + * display power well must be turned off and on again. + * Refuse the put the cursor into that compromised position. + */ + if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && + state->visible && state->base.crtc_x < 0) { + DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); + return -EINVAL; + } + return 0; } @@ -13772,9 +13805,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, crtc = crtc ? crtc : plane->crtc; intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->cursor_bo == obj) - goto update; - if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) @@ -13783,9 +13813,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, addr = obj->phys_handle->busaddr; intel_crtc->cursor_addr = addr; - intel_crtc->cursor_bo = obj; -update: if (crtc->state->active) intel_crtc_update_cursor(crtc, state->visible); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f2a1142bff34..0d00f07b7163 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -550,7 +550,6 @@ struct intel_crtc { int adjusted_x; int adjusted_y; - struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; uint32_t cursor_cntl; uint32_t cursor_size; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 81cdd9ff3892..e6c035b0fc1c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); bool live_status = false; - unsigned int retry = 3; + unsigned int try; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - while (!live_status && --retry) { + for (try = 0; !live_status && try < 9; try++) { + if (try) + msleep(10); live_status = intel_digital_port_connected(dev_priv, hdmi_to_dig_port(intel_hdmi)); - mdelay(10); } if (!live_status) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 071a76b9ac52..f091ad12d694 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 2b: Program RC6 thresholds.*/ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ - if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && - (INTEL_REVID(dev) <= SKL_REVID_E0))) + if (IS_SKYLAKE(dev)) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev) * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index ffa902ece872..05a895496fc6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -156,6 +156,7 @@ nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, return -ENOMEM; nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object); chan->gr = gr; + chan->fifo = fifoch; *pobject = &chan->object; spin_lock_irqsave(&chan->gr->base.engine.lock, flags); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c index 43006db6fd58..80fed7e78dcb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c @@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan) fan->type = NVBIOS_THERM_FAN_UNK; } + fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; fan->min_duty = nvbios_rd08(bios, data + 0x02); fan->max_duty = nvbios_rd08(bios, data + 0x03); diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index b8e4cdec28c3..24f92bea39c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, dma_addr_t paddr; int ret; - /* only doing ARGB32 since this is what is needed to alpha-blend - * with video overlays: - */ sizes->surface_bpp = 32; - sizes->surface_depth = 32; + sizes->surface_depth = 24; DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index 63eb16bf2cf0..883a314cd83a 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -161,7 +161,7 @@ static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs) * The DRM pixel formats and IPU internal representation are ordered the other * way around, with the first named component ordered at the most significant * bits. Further, V4L2 formats are not well defined: - * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html + * https://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html * We choose the interpretation which matches GStreamer behavior. */ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat) diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 9abcaa53bd25..f17cb0431833 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1163,12 +1163,8 @@ done: static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait) { - struct vga_arb_private *priv = file->private_data; - pr_debug("%s\n", __func__); - if (priv == NULL) - return -ENODEV; poll_wait(file, &vga_wait_queue, wait); return POLLIN; } @@ -1209,9 +1205,6 @@ static int vga_arb_release(struct inode *inode, struct file *file) pr_debug("%s\n", __func__); - if (priv == NULL) - return -ENODEV; - spin_lock_irqsave(&vga_user_lock, flags); list_del(&priv->list); for (i = 0; i < MAX_USER_CARDS; i++) { diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 3782636562a1..678663e2085f 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -63,10 +63,6 @@ enum hv_cpuid_function { /* Define version of the synthetic interrupt controller. */ #define HV_SYNIC_VERSION (1) -/* Define synthetic interrupt controller message constants. */ -#define HV_MESSAGE_SIZE (256) -#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) -#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) #define HV_ANY_VP (0xFFFFFFFF) /* Define synthetic interrupt controller flag constants. */ @@ -74,48 +70,9 @@ enum hv_cpuid_function { #define HV_EVENT_FLAGS_BYTE_COUNT (256) #define HV_EVENT_FLAGS_DWORD_COUNT (256 / sizeof(u32)) -/* Define hypervisor message types. */ -enum hv_message_type { - HVMSG_NONE = 0x00000000, - - /* Memory access messages. */ - HVMSG_UNMAPPED_GPA = 0x80000000, - HVMSG_GPA_INTERCEPT = 0x80000001, - - /* Timer notification messages. */ - HVMSG_TIMER_EXPIRED = 0x80000010, - - /* Error messages. */ - HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, - HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, - HVMSG_UNSUPPORTED_FEATURE = 0x80000022, - - /* Trace buffer complete messages. */ - HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, - - /* Platform-specific processor intercept messages. */ - HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, - HVMSG_X64_MSR_INTERCEPT = 0x80010001, - HVMSG_X64_CPUID_INTERCEPT = 0x80010002, - HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, - HVMSG_X64_APIC_EOI = 0x80010004, - HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 -}; - -#define HV_SYNIC_STIMER_COUNT (4) - /* Define invalid partition identifier. */ #define HV_PARTITION_ID_INVALID ((u64)0x0) -/* Define port identifier type. */ -union hv_port_id { - u32 asu32; - struct { - u32 id:24; - u32 reserved:8; - } u ; -}; - /* Define port type. */ enum hv_port_type { HVPORT_MSG = 1, @@ -163,27 +120,6 @@ struct hv_connection_info { }; }; -/* Define synthetic interrupt controller message flags. */ -union hv_message_flags { - u8 asu8; - struct { - u8 msg_pending:1; - u8 reserved:7; - }; -}; - -/* Define synthetic interrupt controller message header. */ -struct hv_message_header { - enum hv_message_type message_type; - u8 payload_size; - union hv_message_flags message_flags; - u8 reserved[2]; - union { - u64 sender; - union hv_port_id port; - }; -}; - /* * Timer configuration register. */ @@ -200,31 +136,9 @@ union hv_timer_config { }; }; - -/* Define timer message payload structure. */ -struct hv_timer_message_payload { - u32 timer_index; - u32 reserved; - u64 expiration_time; /* When the timer expired */ - u64 delivery_time; /* When the message was delivered */ -}; - -/* Define synthetic interrupt controller message format. */ -struct hv_message { - struct hv_message_header header; - union { - u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; - } u ; -}; - /* Define the number of message buffers associated with each port. */ #define HV_PORT_MESSAGE_BUFFER_COUNT (16) -/* Define the synthetic interrupt message page layout. */ -struct hv_message_page { - struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -}; - /* Define the synthetic interrupt controller event flags format. */ union hv_synic_event_flags { u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT]; @@ -347,7 +261,7 @@ enum hv_call_code { struct hv_input_post_message { union hv_connection_id connectionid; u32 reserved; - enum hv_message_type message_type; + u32 message_type; u32 payload_size; u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; }; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 8f59f057cdf4..60fb80bd353d 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -859,16 +859,6 @@ config SENSORS_MAX31790 This driver can also be built as a module. If so, the module will be called max31790. -config SENSORS_HTU21 - tristate "Measurement Specialties HTU21D humidity/temperature sensors" - depends on I2C - help - If you say yes here you get support for the Measurement Specialties - HTU21D humidity and temperature sensors. - - This driver can also be built as a module. If so, the module - will be called htu21. - config SENSORS_MCP3021 tristate "Microchip MCP3021 and compatibles" depends on I2C @@ -1217,6 +1207,7 @@ config SENSORS_PWM_FAN config SENSORS_SHT15 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." depends on GPIOLIB || COMPILE_TEST + select BITREVERSE help If you say yes here you get support for the Sensiron SHT10, SHT11, SHT15, SHT71, SHT75 humidity and temperature sensors. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 12a32398fdcc..30c94df31465 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -68,7 +68,6 @@ obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o -obj-$(CONFIG_SENSORS_HTU21) += htu21.o obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 5f7067d7b625..f77eb971ce95 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -47,6 +47,8 @@ MODULE_LICENSE("GPL"); #define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b +#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F4 0x15b4 + struct fam15h_power_data { struct pci_dev *pdev; unsigned int tdp_to_watts; @@ -124,7 +126,7 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev, if (c->x86 == 0x15 && (c->x86_model <= 0xf || - (c->x86_model >= 0x60 && c->x86_model <= 0x6f))) + (c->x86_model >= 0x60 && c->x86_model <= 0x7f))) n += 1; fam15h_power_attrs = devm_kcalloc(&pdev->dev, n, @@ -138,7 +140,7 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev, fam15h_power_attrs[n++] = &dev_attr_power1_crit.attr; if (c->x86 == 0x15 && (c->x86_model <= 0xf || - (c->x86_model >= 0x60 && c->x86_model <= 0x6f))) + (c->x86_model >= 0x60 && c->x86_model <= 0x7f))) fam15h_power_attrs[n++] = &dev_attr_power1_input.attr; data->group.attrs = fam15h_power_attrs; @@ -296,6 +298,7 @@ static const struct pci_device_id fam15h_power_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, {} diff --git a/drivers/hwmon/htu21.c b/drivers/hwmon/htu21.c deleted file mode 100644 index 4c3bbb72f82a..000000000000 --- a/drivers/hwmon/htu21.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Measurement Specialties HTU21D humidity and temperature sensor driver - * - * Copyright (C) 2013 William Markezana <william.markezana@meas-spec.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/i2c.h> -#include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> -#include <linux/err.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/jiffies.h> - -/* HTU21 Commands */ -#define HTU21_T_MEASUREMENT_HM 0xE3 -#define HTU21_RH_MEASUREMENT_HM 0xE5 - -struct htu21 { - struct i2c_client *client; - struct mutex lock; - bool valid; - unsigned long last_update; - int temperature; - int humidity; -}; - -static inline int htu21_temp_ticks_to_millicelsius(int ticks) -{ - ticks &= ~0x0003; /* clear status bits */ - /* - * Formula T = -46.85 + 175.72 * ST / 2^16 from datasheet p14, - * optimized for integer fixed point (3 digits) arithmetic - */ - return ((21965 * ticks) >> 13) - 46850; -} - -static inline int htu21_rh_ticks_to_per_cent_mille(int ticks) -{ - ticks &= ~0x0003; /* clear status bits */ - /* - * Formula RH = -6 + 125 * SRH / 2^16 from datasheet p14, - * optimized for integer fixed point (3 digits) arithmetic - */ - return ((15625 * ticks) >> 13) - 6000; -} - -static int htu21_update_measurements(struct device *dev) -{ - struct htu21 *htu21 = dev_get_drvdata(dev); - struct i2c_client *client = htu21->client; - int ret = 0; - - mutex_lock(&htu21->lock); - - if (time_after(jiffies, htu21->last_update + HZ / 2) || - !htu21->valid) { - ret = i2c_smbus_read_word_swapped(client, - HTU21_T_MEASUREMENT_HM); - if (ret < 0) - goto out; - htu21->temperature = htu21_temp_ticks_to_millicelsius(ret); - ret = i2c_smbus_read_word_swapped(client, - HTU21_RH_MEASUREMENT_HM); - if (ret < 0) - goto out; - htu21->humidity = htu21_rh_ticks_to_per_cent_mille(ret); - htu21->last_update = jiffies; - htu21->valid = true; - } -out: - mutex_unlock(&htu21->lock); - - return ret >= 0 ? 0 : ret; -} - -static ssize_t htu21_show_temperature(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct htu21 *htu21 = dev_get_drvdata(dev); - int ret; - - ret = htu21_update_measurements(dev); - if (ret < 0) - return ret; - return sprintf(buf, "%d\n", htu21->temperature); -} - -static ssize_t htu21_show_humidity(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct htu21 *htu21 = dev_get_drvdata(dev); - int ret; - - ret = htu21_update_measurements(dev); - if (ret < 0) - return ret; - return sprintf(buf, "%d\n", htu21->humidity); -} - -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, - htu21_show_temperature, NULL, 0); -static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, - htu21_show_humidity, NULL, 0); - -static struct attribute *htu21_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_humidity1_input.dev_attr.attr, - NULL -}; - -ATTRIBUTE_GROUPS(htu21); - -static int htu21_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct device *dev = &client->dev; - struct htu21 *htu21; - struct device *hwmon_dev; - - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_READ_WORD_DATA)) { - dev_err(&client->dev, - "adapter does not support SMBus word transactions\n"); - return -ENODEV; - } - - htu21 = devm_kzalloc(dev, sizeof(*htu21), GFP_KERNEL); - if (!htu21) - return -ENOMEM; - - htu21->client = client; - mutex_init(&htu21->lock); - - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - htu21, - htu21_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); -} - -static const struct i2c_device_id htu21_id[] = { - { "htu21", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, htu21_id); - -static struct i2c_driver htu21_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "htu21", - }, - .probe = htu21_probe, - .id_table = htu21_id, -}; - -module_i2c_driver(htu21_driver); - -MODULE_AUTHOR("William Markezana <william.markezana@meas-spec.com>"); -MODULE_DESCRIPTION("MEAS HTU21D humidity and temperature sensor driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 7a8a6fbf11ff..1f643782ce04 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -920,8 +920,8 @@ static ssize_t aem_set_power_period(struct device *dev, /* Discover sensors on an AEM device */ static int aem_register_sensors(struct aem_data *data, - struct aem_ro_sensor_template *ro, - struct aem_rw_sensor_template *rw) + const struct aem_ro_sensor_template *ro, + const struct aem_rw_sensor_template *rw) { struct device *dev = &data->pdev->dev; struct sensor_device_attribute *sensors = data->sensors; @@ -1020,19 +1020,19 @@ static void aem_remove_sensors(struct aem_data *data) /* Sensor probe functions */ /* Description of AEM1 sensors */ -static struct aem_ro_sensor_template aem1_ro_sensors[] = { +static const struct aem_ro_sensor_template aem1_ro_sensors[] = { {"energy1_input", aem_show_energy, 0}, {"power1_average", aem_show_power, 0}, {NULL, NULL, 0}, }; -static struct aem_rw_sensor_template aem1_rw_sensors[] = { +static const struct aem_rw_sensor_template aem1_rw_sensors[] = { {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, {NULL, NULL, NULL, 0}, }; /* Description of AEM2 sensors */ -static struct aem_ro_sensor_template aem2_ro_sensors[] = { +static const struct aem_ro_sensor_template aem2_ro_sensors[] = { {"energy1_input", aem_show_energy, 0}, {"energy2_input", aem_show_energy, 1}, {"power1_average", aem_show_power, 0}, @@ -1050,7 +1050,7 @@ static struct aem_ro_sensor_template aem2_ro_sensors[] = { {NULL, NULL, 0}, }; -static struct aem_rw_sensor_template aem2_rw_sensors[] = { +static const struct aem_rw_sensor_template aem2_rw_sensors[] = { {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, {"power2_average_interval", aem_show_power_period, aem_set_power_period, 1}, {NULL, NULL, NULL, 0}, diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index 37f01702d081..559c596b24f9 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -29,7 +29,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> -#include <linux/dmi.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -45,7 +45,7 @@ enum kinds { nct6683 }; static bool force; module_param(force, bool, 0); -MODULE_PARM_DESC(force, "Set to one to enable detection on non-Intel boards"); +MODULE_PARM_DESC(force, "Set to one to enable support for unknown vendors"); static const char * const nct6683_device_names[] = { "nct6683", @@ -141,6 +141,7 @@ superio_exit(int ioreg) #define NCT6683_REG_MON(x) (0x100 + (x) * 2) #define NCT6683_REG_FAN_RPM(x) (0x140 + (x) * 2) #define NCT6683_REG_PWM(x) (0x160 + (x)) +#define NCT6683_REG_PWM_WRITE(x) (0xa28 + (x)) #define NCT6683_REG_MON_STS(x) (0x174 + (x)) #define NCT6683_REG_IDLE(x) (0x178 + (x)) @@ -165,8 +166,13 @@ superio_exit(int ioreg) #define NCT6683_REG_FAN_MIN(x) (0x3b8 + (x) * 2) /* 16 bit */ +#define NCT6683_REG_FAN_CFG_CTRL 0xa01 +#define NCT6683_FAN_CFG_REQ 0x80 +#define NCT6683_FAN_CFG_DONE 0x40 + #define NCT6683_REG_CUSTOMER_ID 0x602 #define NCT6683_CUSTOMER_ID_INTEL 0x805 +#define NCT6683_CUSTOMER_ID_MITAC 0xa0e #define NCT6683_REG_BUILD_YEAR 0x604 #define NCT6683_REG_BUILD_MONTH 0x605 @@ -394,7 +400,8 @@ struct sensor_template_group { }; static struct attribute_group * -nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, +nct6683_create_attr_group(struct device *dev, + const struct sensor_template_group *tg, int repeat) { struct sensor_device_attribute_2 *a2; @@ -559,6 +566,7 @@ static int get_temp_reg(struct nct6683_data *data, int nr, int index) break; } break; + case NCT6683_CUSTOMER_ID_MITAC: default: switch (nr) { default: @@ -703,7 +711,7 @@ static struct sensor_device_template *nct6683_attributes_in_template[] = { NULL }; -static struct sensor_template_group nct6683_in_template_group = { +static const struct sensor_template_group nct6683_in_template_group = { .templates = nct6683_attributes_in_template, .is_visible = nct6683_in_is_visible, }; @@ -774,7 +782,7 @@ static struct sensor_device_template *nct6683_attributes_fan_template[] = { NULL }; -static struct sensor_template_group nct6683_fan_template_group = { +static const struct sensor_template_group nct6683_fan_template_group = { .templates = nct6683_attributes_fan_template, .is_visible = nct6683_fan_is_visible, .base = 1, @@ -902,7 +910,7 @@ static struct sensor_device_template *nct6683_attributes_temp_template[] = { NULL }; -static struct sensor_template_group nct6683_temp_template_group = { +static const struct sensor_template_group nct6683_temp_template_group = { .templates = nct6683_attributes_temp_template, .is_visible = nct6683_temp_is_visible, .base = 1, @@ -918,7 +926,29 @@ show_pwm(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%d\n", data->pwm[index]); } -SENSOR_TEMPLATE(pwm, "pwm%d", S_IRUGO, show_pwm, NULL, 0); +static ssize_t +store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct nct6683_data *data = dev_get_drvdata(dev); + int index = sattr->index; + unsigned long val; + + if (kstrtoul(buf, 10, &val) || val > 255) + return -EINVAL; + + mutex_lock(&data->update_lock); + nct6683_write(data, NCT6683_REG_FAN_CFG_CTRL, NCT6683_FAN_CFG_REQ); + usleep_range(1000, 2000); + nct6683_write(data, NCT6683_REG_PWM_WRITE(index), val); + nct6683_write(data, NCT6683_REG_FAN_CFG_CTRL, NCT6683_FAN_CFG_DONE); + mutex_unlock(&data->update_lock); + + return count; +} + +SENSOR_TEMPLATE(pwm, "pwm%d", S_IRUGO, show_pwm, store_pwm, 0); static umode_t nct6683_pwm_is_visible(struct kobject *kobj, struct attribute *attr, int index) @@ -930,6 +960,10 @@ static umode_t nct6683_pwm_is_visible(struct kobject *kobj, if (!(data->have_pwm & (1 << pwm))) return 0; + /* Only update pwm values for Mitac boards */ + if (data->customer_id == NCT6683_CUSTOMER_ID_MITAC) + return attr->mode | S_IWUSR; + return attr->mode; } @@ -938,7 +972,7 @@ static struct sensor_device_template *nct6683_attributes_pwm_template[] = { NULL }; -static struct sensor_template_group nct6683_pwm_template_group = { +static const struct sensor_template_group nct6683_pwm_template_group = { .templates = nct6683_attributes_pwm_template, .is_visible = nct6683_pwm_is_visible, .base = 1, @@ -1170,6 +1204,7 @@ static int nct6683_probe(struct platform_device *pdev) struct device *hwmon_dev; struct resource *res; int groups = 0; + char build[16]; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!devm_request_region(dev, res->start, IOREGION_LENGTH, DRVNAME)) @@ -1187,6 +1222,17 @@ static int nct6683_probe(struct platform_device *pdev) data->customer_id = nct6683_read16(data, NCT6683_REG_CUSTOMER_ID); + /* By default only instantiate driver if the customer ID is known */ + switch (data->customer_id) { + case NCT6683_CUSTOMER_ID_INTEL: + break; + case NCT6683_CUSTOMER_ID_MITAC: + break; + default: + if (!force) + return -ENODEV; + } + nct6683_init_device(data); nct6683_setup_fans(data); nct6683_setup_sensors(data); @@ -1230,13 +1276,22 @@ static int nct6683_probe(struct platform_device *pdev) } data->groups[groups++] = &nct6683_group_other; - dev_info(dev, "%s EC firmware version %d.%d build %02x/%02x/%02x\n", + if (data->customer_id == NCT6683_CUSTOMER_ID_INTEL) + scnprintf(build, sizeof(build), "%02x/%02x/%02x", + nct6683_read(data, NCT6683_REG_BUILD_MONTH), + nct6683_read(data, NCT6683_REG_BUILD_DAY), + nct6683_read(data, NCT6683_REG_BUILD_YEAR)); + else + scnprintf(build, sizeof(build), "%02d/%02d/%02d", + nct6683_read(data, NCT6683_REG_BUILD_MONTH), + nct6683_read(data, NCT6683_REG_BUILD_DAY), + nct6683_read(data, NCT6683_REG_BUILD_YEAR)); + + dev_info(dev, "%s EC firmware version %d.%d build %s\n", nct6683_chip_names[data->kind], nct6683_read(data, NCT6683_REG_VERSION_HI), nct6683_read(data, NCT6683_REG_VERSION_LO), - nct6683_read(data, NCT6683_REG_BUILD_MONTH), - nct6683_read(data, NCT6683_REG_BUILD_DAY), - nct6683_read(data, NCT6683_REG_BUILD_YEAR)); + build); hwmon_dev = devm_hwmon_device_register_with_groups(dev, nct6683_device_names[data->kind], data, data->groups); @@ -1292,20 +1347,10 @@ static struct platform_driver nct6683_driver = { static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data) { - const char *board_vendor; int addr; u16 val; int err; - /* - * Only run on Intel boards unless the 'force' module parameter is set - */ - if (!force) { - board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); - if (!board_vendor || strcmp(board_vendor, "Intel Corporation")) - return -ENODEV; - } - err = superio_enter(sioaddr); if (err) return err; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index d7ebdf8651f5..d087a8e00cf5 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1045,7 +1045,8 @@ struct sensor_template_group { }; static struct attribute_group * -nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, +nct6775_create_attr_group(struct device *dev, + const struct sensor_template_group *tg, int repeat) { struct attribute_group *group; @@ -1827,7 +1828,7 @@ static struct sensor_device_template *nct6775_attributes_in_template[] = { NULL }; -static struct sensor_template_group nct6775_in_template_group = { +static const struct sensor_template_group nct6775_in_template_group = { .templates = nct6775_attributes_in_template, .is_visible = nct6775_in_is_visible, }; @@ -2046,7 +2047,7 @@ static struct sensor_device_template *nct6775_attributes_fan_template[] = { NULL }; -static struct sensor_template_group nct6775_fan_template_group = { +static const struct sensor_template_group nct6775_fan_template_group = { .templates = nct6775_attributes_fan_template, .is_visible = nct6775_fan_is_visible, .base = 1, @@ -2255,7 +2256,7 @@ static struct sensor_device_template *nct6775_attributes_temp_template[] = { NULL }; -static struct sensor_template_group nct6775_temp_template_group = { +static const struct sensor_template_group nct6775_temp_template_group = { .templates = nct6775_attributes_temp_template, .is_visible = nct6775_temp_is_visible, .base = 1, @@ -3117,7 +3118,7 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = { NULL }; -static struct sensor_template_group nct6775_pwm_template_group = { +static const struct sensor_template_group nct6775_pwm_template_group = { .templates = nct6775_attributes_pwm_template, .is_visible = nct6775_pwm_is_visible, .base = 1, diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index df6ebb2b8f0f..7e5cc3d025ef 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -65,6 +65,16 @@ config SENSORS_LTC2978_REGULATOR If you say yes here you get regulator support for Linear Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676. +config SENSORS_LTC3815 + tristate "Linear Technologies LTC3815" + default n + help + If you say yes here you get hardware monitoring support for Linear + Technology LTC3815. + + This driver can also be built as a module. If so, the module will + be called ltc3815. + config SENSORS_MAX16064 tristate "Maxim MAX16064" default n diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index bce046d37f02..562132054aaf 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o +obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c new file mode 100644 index 000000000000..bb32e6276622 --- /dev/null +++ b/drivers/hwmon/pmbus/ltc3815.c @@ -0,0 +1,215 @@ +/* + * Hardware monitoring driver for LTC3815 + * + * Copyright (c) 2015 Linear Technology + * Copyright (c) 2015 Guenter Roeck + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "pmbus.h" + +#define LTC3815_MFR_IOUT_PEAK 0xd7 +#define LTC3815_MFR_VOUT_PEAK 0xdd +#define LTC3815_MFR_VIN_PEAK 0xde +#define LTC3815_MFR_TEMP_PEAK 0xdf +#define LTC3815_MFR_IIN_PEAK 0xe1 +#define LTC3815_MFR_SPECIAL_ID 0xe7 + +#define LTC3815_ID 0x8000 +#define LTC3815_ID_MASK 0xff00 + +static int ltc3815_read_byte_data(struct i2c_client *client, int page, int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VOUT_MODE: + /* + * The chip returns 0x3e, suggesting VID mode with manufacturer + * specific VID codes. Since the output voltage is reported + * with a LSB of 0.5mV, override and report direct mode with + * appropriate coefficients. + */ + ret = 0x40; + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg) +{ + int ret; + + switch (reg) { + case PMBUS_CLEAR_FAULTS: + /* + * LTC3815 does not support the CLEAR_FAULTS command. + * Emulate it by clearing the status register. + */ + ret = pmbus_read_word_data(client, 0, PMBUS_STATUS_WORD); + if (ret > 0) { + pmbus_write_word_data(client, 0, PMBUS_STATUS_WORD, + ret); + ret = 0; + } + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int ltc3815_read_word_data(struct i2c_client *client, int page, int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VIRT_READ_VIN_MAX: + ret = pmbus_read_word_data(client, page, LTC3815_MFR_VIN_PEAK); + break; + case PMBUS_VIRT_READ_VOUT_MAX: + ret = pmbus_read_word_data(client, page, LTC3815_MFR_VOUT_PEAK); + break; + case PMBUS_VIRT_READ_TEMP_MAX: + ret = pmbus_read_word_data(client, page, LTC3815_MFR_TEMP_PEAK); + break; + case PMBUS_VIRT_READ_IOUT_MAX: + ret = pmbus_read_word_data(client, page, LTC3815_MFR_IOUT_PEAK); + break; + case PMBUS_VIRT_READ_IIN_MAX: + ret = pmbus_read_word_data(client, page, LTC3815_MFR_IIN_PEAK); + break; + case PMBUS_VIRT_RESET_VOUT_HISTORY: + case PMBUS_VIRT_RESET_VIN_HISTORY: + case PMBUS_VIRT_RESET_TEMP_HISTORY: + case PMBUS_VIRT_RESET_IOUT_HISTORY: + case PMBUS_VIRT_RESET_IIN_HISTORY: + ret = 0; + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int ltc3815_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + int ret; + + switch (reg) { + case PMBUS_VIRT_RESET_IIN_HISTORY: + ret = pmbus_write_word_data(client, page, + LTC3815_MFR_IIN_PEAK, 0); + break; + case PMBUS_VIRT_RESET_IOUT_HISTORY: + ret = pmbus_write_word_data(client, page, + LTC3815_MFR_IOUT_PEAK, 0); + break; + case PMBUS_VIRT_RESET_VOUT_HISTORY: + ret = pmbus_write_word_data(client, page, + LTC3815_MFR_VOUT_PEAK, 0); + break; + case PMBUS_VIRT_RESET_VIN_HISTORY: + ret = pmbus_write_word_data(client, page, + LTC3815_MFR_VIN_PEAK, 0); + break; + case PMBUS_VIRT_RESET_TEMP_HISTORY: + ret = pmbus_write_word_data(client, page, + LTC3815_MFR_TEMP_PEAK, 0); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static const struct i2c_device_id ltc3815_id[] = { + {"ltc3815", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, ltc3815_id); + +static struct pmbus_driver_info ltc3815_info = { + .pages = 1, + .format[PSC_VOLTAGE_IN] = direct, + .format[PSC_VOLTAGE_OUT] = direct, + .format[PSC_CURRENT_IN] = direct, + .format[PSC_CURRENT_OUT] = direct, + .format[PSC_TEMPERATURE] = direct, + .m[PSC_VOLTAGE_IN] = 250, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = 0, + .m[PSC_VOLTAGE_OUT] = 2, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = 3, + .m[PSC_CURRENT_IN] = 1, + .b[PSC_CURRENT_IN] = 0, + .R[PSC_CURRENT_IN] = 2, + .m[PSC_CURRENT_OUT] = 1, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = 2, + .m[PSC_TEMPERATURE] = 1, + .b[PSC_TEMPERATURE] = 0, + .R[PSC_TEMPERATURE] = 0, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP, + .read_byte_data = ltc3815_read_byte_data, + .read_word_data = ltc3815_read_word_data, + .write_byte = ltc3815_write_byte, + .write_word_data = ltc3815_write_word_data, +}; + +static int ltc3815_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int chip_id; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_WORD_DATA)) + return -ENODEV; + + chip_id = i2c_smbus_read_word_data(client, LTC3815_MFR_SPECIAL_ID); + if (chip_id < 0) + return chip_id; + if ((chip_id & LTC3815_ID_MASK) != LTC3815_ID) + return -ENODEV; + + return pmbus_do_probe(client, id, <c3815_info); +} + +static struct i2c_driver ltc3815_driver = { + .driver = { + .name = "ltc3815", + }, + .probe = ltc3815_probe, + .remove = pmbus_do_remove, + .id_table = ltc3815_id, +}; + +module_i2c_driver(ltc3815_driver); + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("PMBus driver for LTC3815"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index 65482624ea2c..5289aa0980a8 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -58,6 +58,7 @@ struct tmp102 { u16 config_orig; unsigned long last_update; int temp[3]; + bool first_time; }; /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ @@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev) tmp102->temp[i] = tmp102_reg_to_mC(status); } tmp102->last_update = jiffies; + tmp102->first_time = false; } mutex_unlock(&tmp102->lock); return tmp102; @@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp) { struct tmp102 *tmp102 = tmp102_update_device(dev); + /* Is it too early even to return a conversion? */ + if (tmp102->first_time) { + dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__); + return -EAGAIN; + } + *temp = tmp102->temp[0]; return 0; @@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev, struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct tmp102 *tmp102 = tmp102_update_device(dev); + /* Is it too early even to return a read? */ + if (tmp102->first_time) + return -EAGAIN; + return sprintf(buf, "%d\n", tmp102->temp[sda->index]); } @@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client, status = -ENODEV; goto fail_restore_config; } - tmp102->last_update = jiffies - HZ; + tmp102->last_update = jiffies; + /* Mark that we are not ready with data until conversion is complete */ + tmp102->first_time = true; mutex_init(&tmp102->lock); hwmon_dev = hwmon_device_register_with_groups(dev, client->name, diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index c5628a42170a..a8bdcb5292f5 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) * d is always 6 on Keystone I2C controller */ - /* get minimum of 7 MHz clock, but max of 12 MHz */ - psc = (input_clock / 7000000) - 1; + /* + * Both Davinci and current Keystone User Guides recommend a value + * between 7MHz and 12MHz. In reality 7MHz module clock doesn't + * always produce enough margin between SDA and SCL transitions. + * Measurements show that the higher the module clock is, the + * bigger is the margin, providing more reliable communication. + * So we better target for 12MHz. + */ + psc = (input_clock / 12000000) - 1; if ((input_clock / (psc + 1)) > 12000000) psc++; /* better to run under spec than over */ d = (psc >= 2) ? 5 : 7 - psc; diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c index 7d7ae97476e2..e38c2bbba940 100644 --- a/drivers/i2c/busses/i2c-designware-baytrail.c +++ b/drivers/i2c/busses/i2c-designware-baytrail.c @@ -34,8 +34,7 @@ static int get_sem(struct device *dev, u32 *sem) u32 data; int ret; - ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, - &data); + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data); if (ret) { dev_err(dev, "iosf failed to read punit semaphore\n"); return ret; @@ -50,21 +49,19 @@ static void reset_semaphore(struct device *dev) { u32 data; - if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, - PUNIT_SEMAPHORE, &data)) { + if (iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data)) { dev_err(dev, "iosf failed to reset punit semaphore during read\n"); return; } data &= ~PUNIT_SEMAPHORE_BIT; - if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, - PUNIT_SEMAPHORE, data)) + if (iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, data)) dev_err(dev, "iosf failed to reset punit semaphore during write\n"); } static int baytrail_i2c_acquire(struct dw_i2c_dev *dev) { - u32 sem; + u32 sem = PUNIT_SEMAPHORE_ACQUIRE; int ret; unsigned long start, end; @@ -77,8 +74,7 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev) return 0; /* host driver writes to side band semaphore register */ - ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, - PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE); + ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, sem); if (ret) { dev_err(dev->dev, "iosf punit semaphore request failed\n"); return ret; @@ -102,8 +98,7 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev) dev_err(dev->dev, "punit semaphore timed out, resetting\n"); reset_semaphore(dev->dev); - ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, - PUNIT_SEMAPHORE, &sem); + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &sem); if (ret) dev_err(dev->dev, "iosf failed to read punit semaphore\n"); else diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 8c48b27ba059..de7fbbb374cd 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) tx_aborted: if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) complete(&dev->cmd_complete); + else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { + /* workaround to trigger pending interrupt */ + stat = dw_readl(dev, DW_IC_INTR_MASK); + i2c_dw_disable_int(dev); + dw_writel(dev, stat, DW_IC_INTR_MASK); + } return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 1d50898e7b24..9ffb63a60f95 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -111,6 +111,7 @@ struct dw_i2c_dev { #define ACCESS_SWAP 0x00000001 #define ACCESS_16BIT 0x00000002 +#define ACCESS_INTR_MASK 0x00000004 extern int i2c_dw_init(struct dw_i2c_dev *dev); extern void i2c_dw_disable(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 809579ecb5a4..bf72ae740fc1 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -36,6 +36,7 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/acpi.h> @@ -93,6 +94,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], static int dw_i2c_acpi_configure(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); + const struct acpi_device_id *id; dev->adapter.nr = -1; dev->tx_fifo_depth = 32; @@ -106,6 +108,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &dev->sda_hold_time); + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id && id->driver_data) + dev->accessor_flags |= (u32)id->driver_data; + return 0; } @@ -116,7 +122,8 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT3433", 0 }, { "80860F41", 0 }, { "808622C1", 0 }, - { "AMD0010", 0 }, + { "AMD0010", ACCESS_INTR_MASK }, + { "APMC0D0F", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); @@ -129,10 +136,10 @@ static inline int dw_i2c_acpi_configure(struct platform_device *pdev) static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); struct dw_i2c_dev *dev; struct i2c_adapter *adap; struct resource *mem; - struct dw_i2c_platform_data *pdata; int irq, r; u32 clk_freq, ht = 0; @@ -156,33 +163,28 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) /* fast mode by default because of legacy reasons */ clk_freq = 400000; - if (has_acpi_companion(&pdev->dev)) { - dw_i2c_acpi_configure(pdev); - } else if (pdev->dev.of_node) { - of_property_read_u32(pdev->dev.of_node, - "i2c-sda-hold-time-ns", &ht); - - of_property_read_u32(pdev->dev.of_node, - "i2c-sda-falling-time-ns", - &dev->sda_falling_time); - of_property_read_u32(pdev->dev.of_node, - "i2c-scl-falling-time-ns", - &dev->scl_falling_time); - - of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &clk_freq); - - /* Only standard mode at 100kHz and fast mode at 400kHz - * are supported. - */ - if (clk_freq != 100000 && clk_freq != 400000) { - dev_err(&pdev->dev, "Only 100kHz and 400kHz supported"); - return -EINVAL; - } + if (pdata) { + clk_freq = pdata->i2c_scl_freq; } else { - pdata = dev_get_platdata(&pdev->dev); - if (pdata) - clk_freq = pdata->i2c_scl_freq; + device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns", + &ht); + device_property_read_u32(&pdev->dev, "i2c-sda-falling-time-ns", + &dev->sda_falling_time); + device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns", + &dev->scl_falling_time); + device_property_read_u32(&pdev->dev, "clock-frequency", + &clk_freq); + } + + if (has_acpi_companion(&pdev->dev)) + dw_i2c_acpi_configure(pdev); + + /* + * Only standard mode at 100kHz and fast mode at 400kHz are supported. + */ + if (clk_freq != 100000 && clk_freq != 400000) { + dev_err(&pdev->dev, "Only 100kHz and 400kHz supported"); + return -EINVAL; } r = i2c_dw_eval_lock_support(dev); @@ -240,12 +242,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) } r = i2c_dw_probe(dev); - if (r) { + if (r && !dev->pm_runtime_disabled) pm_runtime_disable(&pdev->dev); - return r; - } - return 0; + return r; } static int dw_i2c_plat_remove(struct platform_device *pdev) @@ -260,7 +260,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); + if (!dev->pm_runtime_disabled) + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 9bb0b056b25f..d4d853680ae4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev) i2c_imx, IMX_I2C_I2CR); imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); + i2c_imx_init_recovery_info(i2c_imx, pdev); + /* Add I2C adapter */ ret = i2c_add_numbered_adapter(&i2c_imx->adapter); if (ret < 0) { @@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev) goto clk_disable; } - i2c_imx_init_recovery_info(i2c_imx, pdev); - /* Set up platform driver data */ platform_set_drvdata(pdev, i2c_imx); clk_disable_unprepare(i2c_imx->clk); diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 5801227b97ab..43207f52e5a3 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -146,6 +146,8 @@ struct mv64xxx_i2c_data { bool errata_delay; struct reset_control *rstc; bool irq_clear_inverted; + /* Clk div is 2 to the power n, not 2 to the power n + 1 */ + bool clk_n_base_0; }; static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { @@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); #ifdef CONFIG_OF #ifdef CONFIG_HAVE_CLK static int -mv64xxx_calc_freq(const int tclk, const int n, const int m) +mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data, + const int tclk, const int n, const int m) { - return tclk / (10 * (m + 1) * (2 << n)); + if (drv_data->clk_n_base_0) + return tclk / (10 * (m + 1) * (1 << n)); + else + return tclk / (10 * (m + 1) * (2 << n)); } static bool -mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, - u32 *best_m) +mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data, + const u32 req_freq, const u32 tclk) { int freq, delta, best_delta = INT_MAX; int m, n; for (n = 0; n <= 7; n++) for (m = 0; m <= 15; m++) { - freq = mv64xxx_calc_freq(tclk, n, m); + freq = mv64xxx_calc_freq(drv_data, tclk, n, m); delta = req_freq - freq; if (delta >= 0 && delta < best_delta) { - *best_m = m; - *best_n = n; + drv_data->freq_m = m; + drv_data->freq_n = n; best_delta = delta; } if (best_delta == 0) @@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, if (of_property_read_u32(np, "clock-frequency", &bus_freq)) bus_freq = 100000; /* 100kHz by default */ - if (!mv64xxx_find_baud_factors(bus_freq, tclk, - &drv_data->freq_n, &drv_data->freq_m)) { + if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") || + of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) + drv_data->clk_n_base_0 = true; + + if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) { rc = -EINVAL; goto out; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index b0ae560b38c3..599c0d7bd906 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave) if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; - pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); + pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv)); priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); @@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave) priv->slave = NULL; - pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); + pm_runtime_put(rcar_i2c_priv_to_dev(priv)); return 0; } diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index c1935ebd6a9c..9096d17beb5b 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev) &i2c->scl_fall_ns)) i2c->scl_fall_ns = 300; if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", - &i2c->scl_fall_ns)) + &i2c->sda_fall_ns)) i2c->sda_fall_ns = i2c->scl_fall_ns; strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index ea72dca32fdf..25020ec777c9 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev) adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); - snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); + snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d2d5d004f16d..2d762a2ecd81 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id) return cma_protocol_roce_dev_port(device, port_num); } -static bool cma_match_net_dev(const struct rdma_id_private *id_priv, - const struct net_device *net_dev) +static bool cma_match_net_dev(const struct rdma_cm_id *id, + const struct net_device *net_dev, + u8 port_num) { - const struct rdma_addr *addr = &id_priv->id.route.addr; + const struct rdma_addr *addr = &id->route.addr; if (!net_dev) /* This request is an AF_IB request or a RoCE request */ - return addr->src_addr.ss_family == AF_IB || - cma_protocol_roce(&id_priv->id); + return (!id->port_num || id->port_num == port_num) && + (addr->src_addr.ss_family == AF_IB || + cma_protocol_roce_dev_port(id->device, port_num)); return !addr->dev_addr.bound_dev_if || (net_eq(dev_net(net_dev), addr->dev_addr.net) && @@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener( hlist_for_each_entry(id_priv, &bind_list->owners, node) { if (cma_match_private_data(id_priv, ib_event->private_data)) { if (id_priv->id.device == cm_id->device && - cma_match_net_dev(id_priv, net_dev)) + cma_match_net_dev(&id_priv->id, net_dev, req->port)) return id_priv; list_for_each_entry(id_priv_dev, &id_priv->listen_list, listen_list) { if (id_priv_dev->id.device == cm_id->device && - cma_match_net_dev(id_priv_dev, net_dev)) + cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) return id_priv_dev; } } diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 870e56b6b25f..26833bfa639b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -40,6 +40,7 @@ #include <linux/gfp.h> #include <rdma/ib_pma.h> +#include <linux/mlx4/driver.h> #include "mlx4_ib.h" enum { @@ -606,8 +607,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, struct ib_mad *mad) { struct mlx4_ib_dev *dev = to_mdev(ibdev); - int err; - int slave; + int err, other_port; + int slave = -1; u8 *slave_id; int is_eth = 0; @@ -625,7 +626,17 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); return -EINVAL; } - if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) { + err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave); + if (err && mlx4_is_mf_bonded(dev->dev)) { + other_port = (port == 1) ? 2 : 1; + err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave); + if (!err) { + port = other_port; + pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", + slave, grh->dgid.raw, port, other_port); + } + } + if (err) { mlx4_ib_warn(ibdev, "failed matching grh\n"); return -ENOENT; } diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 8d133c40fa0e..c394376ebe06 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); ib_umem_release(msrq->umem); } else { - kfree(msrq->wrid); + kvfree(msrq->wrid); mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, &msrq->buf); mlx4_db_free(dev->dev, &msrq->db); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 7e97cb55a6bf..b0ec175cc6ba 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -43,6 +43,9 @@ #include <linux/mlx5/vport.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> +#include <linux/in.h> +#include <linux/etherdevice.h> +#include <linux/mlx5/fs.h> #include "user.h" #include "mlx5_ib.h" @@ -835,6 +838,457 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) return 0; } +static bool outer_header_zero(u32 *match_criteria) +{ + int size = MLX5_ST_SZ_BYTES(fte_match_param); + char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + + return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, + outer_headers_c + 1, + size - 1); +} + +static int parse_flow_attr(u32 *match_c, u32 *match_v, + union ib_flow_spec *ib_spec) +{ + void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + switch (ib_spec->type) { + case IB_FLOW_SPEC_ETH: + if (ib_spec->size != sizeof(ib_spec->eth)) + return -EINVAL; + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dmac_47_16), + ib_spec->eth.mask.dst_mac); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + dmac_47_16), + ib_spec->eth.val.dst_mac); + + if (ib_spec->eth.mask.vlan_tag) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + vlan_tag, 1); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + first_vid, ntohs(ib_spec->eth.val.vlan_tag)); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + first_cfi, + ntohs(ib_spec->eth.mask.vlan_tag) >> 12); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + first_cfi, + ntohs(ib_spec->eth.val.vlan_tag) >> 12); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + first_prio, + ntohs(ib_spec->eth.mask.vlan_tag) >> 13); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + first_prio, + ntohs(ib_spec->eth.val.vlan_tag) >> 13); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + ethertype, ntohs(ib_spec->eth.mask.ether_type)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + ethertype, ntohs(ib_spec->eth.val.ether_type)); + break; + case IB_FLOW_SPEC_IPV4: + if (ib_spec->size != sizeof(ib_spec->ipv4)) + return -EINVAL; + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + ethertype, 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + ethertype, ETH_P_IP); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ib_spec->ipv4.mask.src_ip, + sizeof(ib_spec->ipv4.mask.src_ip)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ib_spec->ipv4.val.src_ip, + sizeof(ib_spec->ipv4.val.src_ip)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ib_spec->ipv4.mask.dst_ip, + sizeof(ib_spec->ipv4.mask.dst_ip)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ib_spec->ipv4.val.dst_ip, + sizeof(ib_spec->ipv4.val.dst_ip)); + break; + case IB_FLOW_SPEC_TCP: + if (ib_spec->size != sizeof(ib_spec->tcp_udp)) + return -EINVAL; + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_TCP); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, + ntohs(ib_spec->tcp_udp.mask.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, + ntohs(ib_spec->tcp_udp.val.src_port)); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, + ntohs(ib_spec->tcp_udp.mask.dst_port)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, + ntohs(ib_spec->tcp_udp.val.dst_port)); + break; + case IB_FLOW_SPEC_UDP: + if (ib_spec->size != sizeof(ib_spec->tcp_udp)) + return -EINVAL; + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_UDP); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, + ntohs(ib_spec->tcp_udp.mask.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, + ntohs(ib_spec->tcp_udp.val.src_port)); + + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, + ntohs(ib_spec->tcp_udp.mask.dst_port)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, + ntohs(ib_spec->tcp_udp.val.dst_port)); + break; + default: + return -EINVAL; + } + + return 0; +} + +/* If a flow could catch both multicast and unicast packets, + * it won't fall into the multicast flow steering table and this rule + * could steal other multicast packets. + */ +static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) +{ + struct ib_flow_spec_eth *eth_spec; + + if (ib_attr->type != IB_FLOW_ATTR_NORMAL || + ib_attr->size < sizeof(struct ib_flow_attr) + + sizeof(struct ib_flow_spec_eth) || + ib_attr->num_of_specs < 1) + return false; + + eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1); + if (eth_spec->type != IB_FLOW_SPEC_ETH || + eth_spec->size != sizeof(*eth_spec)) + return false; + + return is_multicast_ether_addr(eth_spec->mask.dst_mac) && + is_multicast_ether_addr(eth_spec->val.dst_mac); +} + +static bool is_valid_attr(struct ib_flow_attr *flow_attr) +{ + union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); + bool has_ipv4_spec = false; + bool eth_type_ipv4 = true; + unsigned int spec_index; + + /* Validate that ethertype is correct */ + for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { + if (ib_spec->type == IB_FLOW_SPEC_ETH && + ib_spec->eth.mask.ether_type) { + if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) && + ib_spec->eth.val.ether_type == htons(ETH_P_IP))) + eth_type_ipv4 = false; + } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) { + has_ipv4_spec = true; + } + ib_spec = (void *)ib_spec + ib_spec->size; + } + return !has_ipv4_spec || eth_type_ipv4; +} + +static void put_flow_table(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *prio, bool ft_added) +{ + prio->refcount -= !!ft_added; + if (!prio->refcount) { + mlx5_destroy_flow_table(prio->flow_table); + prio->flow_table = NULL; + } +} + +static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) +{ + struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); + struct mlx5_ib_flow_handler *handler = container_of(flow_id, + struct mlx5_ib_flow_handler, + ibflow); + struct mlx5_ib_flow_handler *iter, *tmp; + + mutex_lock(&dev->flow_db.lock); + + list_for_each_entry_safe(iter, tmp, &handler->list, list) { + mlx5_del_flow_rule(iter->rule); + list_del(&iter->list); + kfree(iter); + } + + mlx5_del_flow_rule(handler->rule); + put_flow_table(dev, &dev->flow_db.prios[handler->prio], true); + mutex_unlock(&dev->flow_db.lock); + + kfree(handler); + + return 0; +} + +#define MLX5_FS_MAX_TYPES 10 +#define MLX5_FS_MAX_ENTRIES 32000UL +static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, + struct ib_flow_attr *flow_attr) +{ + struct mlx5_flow_namespace *ns = NULL; + struct mlx5_ib_flow_prio *prio; + struct mlx5_flow_table *ft; + int num_entries; + int num_groups; + int priority; + int err = 0; + + if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { + if (flow_is_multicast_only(flow_attr)) + priority = MLX5_IB_FLOW_MCAST_PRIO; + else + priority = flow_attr->priority; + ns = mlx5_get_flow_namespace(dev->mdev, + MLX5_FLOW_NAMESPACE_BYPASS); + num_entries = MLX5_FS_MAX_ENTRIES; + num_groups = MLX5_FS_MAX_TYPES; + prio = &dev->flow_db.prios[priority]; + } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || + flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { + ns = mlx5_get_flow_namespace(dev->mdev, + MLX5_FLOW_NAMESPACE_LEFTOVERS); + build_leftovers_ft_param(&priority, + &num_entries, + &num_groups); + prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; + } + + if (!ns) + return ERR_PTR(-ENOTSUPP); + + ft = prio->flow_table; + if (!ft) { + ft = mlx5_create_auto_grouped_flow_table(ns, priority, + num_entries, + num_groups); + + if (!IS_ERR(ft)) { + prio->refcount = 0; + prio->flow_table = ft; + } else { + err = PTR_ERR(ft); + } + } + + return err ? ERR_PTR(err) : prio; +} + +static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + struct ib_flow_attr *flow_attr, + struct mlx5_flow_destination *dst) +{ + struct mlx5_flow_table *ft = ft_prio->flow_table; + struct mlx5_ib_flow_handler *handler; + void *ib_flow = flow_attr + 1; + u8 match_criteria_enable = 0; + unsigned int spec_index; + u32 *match_c; + u32 *match_v; + int err = 0; + + if (!is_valid_attr(flow_attr)) + return ERR_PTR(-EINVAL); + + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + handler = kzalloc(sizeof(*handler), GFP_KERNEL); + if (!handler || !match_c || !match_v) { + err = -ENOMEM; + goto free; + } + + INIT_LIST_HEAD(&handler->list); + + for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { + err = parse_flow_attr(match_c, match_v, ib_flow); + if (err < 0) + goto free; + + ib_flow += ((union ib_flow_spec *)ib_flow)->size; + } + + /* Outer header support only */ + match_criteria_enable = (!outer_header_zero(match_c)) << 0; + handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, + match_c, match_v, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + dst); + + if (IS_ERR(handler->rule)) { + err = PTR_ERR(handler->rule); + goto free; + } + + handler->prio = ft_prio - dev->flow_db.prios; + + ft_prio->flow_table = ft; +free: + if (err) + kfree(handler); + kfree(match_c); + kfree(match_v); + return err ? ERR_PTR(err) : handler; +} + +enum { + LEFTOVERS_MC, + LEFTOVERS_UC, +}; + +static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + struct ib_flow_attr *flow_attr, + struct mlx5_flow_destination *dst) +{ + struct mlx5_ib_flow_handler *handler_ucast = NULL; + struct mlx5_ib_flow_handler *handler = NULL; + + static struct { + struct ib_flow_attr flow_attr; + struct ib_flow_spec_eth eth_flow; + } leftovers_specs[] = { + [LEFTOVERS_MC] = { + .flow_attr = { + .num_of_specs = 1, + .size = sizeof(leftovers_specs[0]) + }, + .eth_flow = { + .type = IB_FLOW_SPEC_ETH, + .size = sizeof(struct ib_flow_spec_eth), + .mask = {.dst_mac = {0x1} }, + .val = {.dst_mac = {0x1} } + } + }, + [LEFTOVERS_UC] = { + .flow_attr = { + .num_of_specs = 1, + .size = sizeof(leftovers_specs[0]) + }, + .eth_flow = { + .type = IB_FLOW_SPEC_ETH, + .size = sizeof(struct ib_flow_spec_eth), + .mask = {.dst_mac = {0x1} }, + .val = {.dst_mac = {} } + } + } + }; + + handler = create_flow_rule(dev, ft_prio, + &leftovers_specs[LEFTOVERS_MC].flow_attr, + dst); + if (!IS_ERR(handler) && + flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { + handler_ucast = create_flow_rule(dev, ft_prio, + &leftovers_specs[LEFTOVERS_UC].flow_attr, + dst); + if (IS_ERR(handler_ucast)) { + kfree(handler); + handler = handler_ucast; + } else { + list_add(&handler_ucast->list, &handler->list); + } + } + + return handler; +} + +static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, + struct ib_flow_attr *flow_attr, + int domain) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct mlx5_ib_flow_handler *handler = NULL; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_ib_flow_prio *ft_prio; + int err; + + if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) + return ERR_PTR(-ENOSPC); + + if (domain != IB_FLOW_DOMAIN_USER || + flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || + flow_attr->flags) + return ERR_PTR(-EINVAL); + + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) + return ERR_PTR(-ENOMEM); + + mutex_lock(&dev->flow_db.lock); + + ft_prio = get_flow_table(dev, flow_attr); + if (IS_ERR(ft_prio)) { + err = PTR_ERR(ft_prio); + goto unlock; + } + + dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; + + if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { + handler = create_flow_rule(dev, ft_prio, flow_attr, + dst); + } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || + flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { + handler = create_leftovers_rule(dev, ft_prio, flow_attr, + dst); + } else { + err = -EINVAL; + goto destroy_ft; + } + + if (IS_ERR(handler)) { + err = PTR_ERR(handler); + handler = NULL; + goto destroy_ft; + } + + ft_prio->refcount++; + mutex_unlock(&dev->flow_db.lock); + kfree(dst); + + return &handler->ibflow; + +destroy_ft: + put_flow_table(dev, ft_prio, false); +unlock: + mutex_unlock(&dev->flow_db.lock); + kfree(dst); + kfree(handler); + return ERR_PTR(err); +} + static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); @@ -1439,10 +1893,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); } + if (mlx5_ib_port_link_layer(&dev->ib_dev) == + IB_LINK_LAYER_ETHERNET) { + dev->ib_dev.create_flow = mlx5_ib_create_flow; + dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; + dev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); + } err = init_node_data(dev); if (err) goto err_dealloc; + mutex_init(&dev->flow_db.lock); mutex_init(&dev->cap_mask_mutex); err = create_dev_resources(&dev->devr); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 633347260b79..1474cccd1e0f 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -105,6 +105,36 @@ struct mlx5_ib_pd { u32 pdn; }; +#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) +#define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1) +#if (MLX5_IB_FLOW_LAST_PRIO <= 0) +#error "Invalid number of bypass priorities" +#endif +#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) + +#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) +struct mlx5_ib_flow_prio { + struct mlx5_flow_table *flow_table; + unsigned int refcount; +}; + +struct mlx5_ib_flow_handler { + struct list_head list; + struct ib_flow ibflow; + unsigned int prio; + struct mlx5_flow_rule *rule; +}; + +struct mlx5_ib_flow_db { + struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; + /* Protect flow steering bypass flow tables + * when add/del flow rules. + * only single add/removal of flow steering rule could be done + * simultaneously. + */ + struct mutex lock; +}; + /* Use macros here so that don't have to duplicate * enum ib_send_flags and enum ib_qp_type for low-level driver */ @@ -171,9 +201,21 @@ struct mlx5_ib_pfault { struct mlx5_pagefault mpfault; }; +struct mlx5_ib_rq { + u32 tirn; +}; + +struct mlx5_ib_raw_packet_qp { + struct mlx5_ib_rq rq; +}; + struct mlx5_ib_qp { struct ib_qp ibqp; - struct mlx5_core_qp mqp; + union { + struct mlx5_core_qp mqp; + struct mlx5_ib_raw_packet_qp raw_packet_qp; + }; + struct mlx5_buf buf; struct mlx5_db db; @@ -431,6 +473,7 @@ struct mlx5_ib_dev { */ struct srcu_struct mr_srcu; #endif + struct mlx5_ib_flow_db flow_db; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index ae80590aabdf..040bb8b5cb15 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -232,6 +232,10 @@ struct phy_info { u16 interface_type; }; +enum ocrdma_flags { + OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01 +}; + struct ocrdma_dev { struct ib_device ibdev; struct ocrdma_dev_attr attr; @@ -287,6 +291,7 @@ struct ocrdma_dev { atomic_t update_sl; u16 pvid; u32 asic_id; + u32 flags; ulong last_stats_time; struct mutex stats_lock; /* provide synch for debugfs operations */ @@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state) (state & OCRDMA_STATE_FLAG_SYNC); } +static inline u8 ocrdma_get_ae_link_state(u32 ae_state) +{ + return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT); +} + #endif diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 30f67bebffa3..283ca842ff74 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); + /* Request link events on this MQ. */ + cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE); cmd->async_cqid_ringsize = cq->id; cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << @@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, } } +static void ocrdma_process_link_state(struct ocrdma_dev *dev, + struct ocrdma_ae_mcqe *cqe) +{ + struct ocrdma_ae_lnkst_mcqe *evt; + u8 lstate; + + evt = (struct ocrdma_ae_lnkst_mcqe *)cqe; + lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn); + + if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK)) + return; + + if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT) + ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK)); +} + static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) { /* async CQE processing */ struct ocrdma_ae_mcqe *cqe = ae_cqe; u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; - - if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE) + switch (evt_code) { + case OCRDMA_ASYNC_LINK_EVE_CODE: + ocrdma_process_link_state(dev, cqe); + break; + case OCRDMA_ASYNC_RDMA_EVE_CODE: ocrdma_dispatch_ibevent(dev, cqe); - else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE) + break; + case OCRDMA_ASYNC_GRP5_EVE_CODE: ocrdma_process_grp5_aync(dev, cqe); - else + break; + default: pr_err("%s(%d) invalid evt code=0x%x\n", __func__, dev->id, evt_code); + } } static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) @@ -1363,7 +1387,8 @@ mbx_err: return status; } -int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) +int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed, + u8 *lnk_state) { int status = -ENOMEM; struct ocrdma_get_link_speed_rsp *rsp; @@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) goto mbx_err; rsp = (struct ocrdma_get_link_speed_rsp *)cmd; - *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) - >> OCRDMA_PHY_PS_SHIFT; + if (lnk_speed) + *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) + >> OCRDMA_PHY_PS_SHIFT; + if (lnk_state) + *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK); mbx_err: kfree(cmd); @@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); - if (vlan_id < 0x1000) { - if (dev->pfc_state) { - vlan_id = 0; + if (vlan_id == 0xFFFF) + vlan_id = 0; + if (vlan_id || dev->pfc_state) { + if (!vlan_id) { pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", dev->id); pr_err("ocrdma%d:Using VLAN 0 for this connection\n", diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index 7ed885c1851e..ebc1f442aec3 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, bool solicited, u16 cqe_popped); /* verbs specific mailbox commands */ -int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); +int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed, + u8 *lnk_st); int ocrdma_query_config(struct ocrdma_dev *, struct ocrdma_mbx_query_config *config); @@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev); void ocrdma_init_service_level(struct ocrdma_dev *); void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); void ocrdma_free_pd_range(struct ocrdma_dev *dev); +void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate); #endif /* __OCRDMA_HW_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 62b7009daa6c..3afb40b85159 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) { int status = 0, i; + u8 lstate = 0; struct ocrdma_dev *dev; dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); @@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) if (status) goto alloc_err; + /* Query Link state and update */ + status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate); + if (!status) + ocrdma_update_link_state(dev, lstate); + for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) goto sysfs_err; @@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev) ocrdma_remove_free(dev); } -static int ocrdma_open(struct ocrdma_dev *dev) +static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev) { struct ib_event port_event; @@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev) return 0; } -static int ocrdma_close(struct ocrdma_dev *dev) +static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev) { - int i; - struct ocrdma_qp *qp, **cur_qp; struct ib_event err_event; - struct ib_qp_attr attrs; - int attr_mask = IB_QP_STATE; - - attrs.qp_state = IB_QPS_ERR; - mutex_lock(&dev->dev_lock); - if (dev->qp_tbl) { - cur_qp = dev->qp_tbl; - for (i = 0; i < OCRDMA_MAX_QP; i++) { - qp = cur_qp[i]; - if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { - /* change the QP state to ERROR */ - _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); - - err_event.event = IB_EVENT_QP_FATAL; - err_event.element.qp = &qp->ibqp; - err_event.device = &dev->ibdev; - ib_dispatch_event(&err_event); - } - } - } - mutex_unlock(&dev->dev_lock); err_event.event = IB_EVENT_PORT_ERR; err_event.element.port_num = 1; @@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev) static void ocrdma_shutdown(struct ocrdma_dev *dev) { - ocrdma_close(dev); + ocrdma_dispatch_port_error(dev); ocrdma_remove(dev); } @@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev) static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) { switch (event) { - case BE_DEV_UP: - ocrdma_open(dev); - break; - case BE_DEV_DOWN: - ocrdma_close(dev); - break; case BE_DEV_SHUTDOWN: ocrdma_shutdown(dev); break; + default: + break; } } +void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate) +{ + if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) { + dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT; + if (!lstate) + return; + } + + if (!lstate) + ocrdma_dispatch_port_error(dev); + else + ocrdma_dispatch_port_active(dev); +} + static struct ocrdma_driver ocrdma_drv = { .name = "ocrdma_driver", .add = ocrdma_add, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 6a38268bbe9f..99dd6fdf06d7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe { u32 valid_ae_event; }; -#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 -#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 +enum ocrdma_async_event_code { + OCRDMA_ASYNC_LINK_EVE_CODE = 0x01, + OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05, + OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14 +}; enum ocrdma_async_grp5_events { OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, @@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE { OCRDMA_MAX_ASYNC_ERRORS }; +struct ocrdma_ae_lnkst_mcqe { + u32 speed_state_ptn; + u32 qos_reason_falut; + u32 evt_tag; + u32 valid_ae_event; +}; + +enum { + OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F, + OCRDMA_AE_LSC_PT_SHIFT = 0x06, + OCRDMA_AE_LSC_PT_MASK = (0x03 << + OCRDMA_AE_LSC_PT_SHIFT), + OCRDMA_AE_LSC_LS_SHIFT = 0x08, + OCRDMA_AE_LSC_LS_MASK = (0xFF << + OCRDMA_AE_LSC_LS_SHIFT), + OCRDMA_AE_LSC_LD_SHIFT = 0x10, + OCRDMA_AE_LSC_LD_MASK = (0xFF << + OCRDMA_AE_LSC_LD_SHIFT), + OCRDMA_AE_LSC_PPS_SHIFT = 0x18, + OCRDMA_AE_LSC_PPS_MASK = (0xFF << + OCRDMA_AE_LSC_PPS_SHIFT), + OCRDMA_AE_LSC_PPF_MASK = 0xFF, + OCRDMA_AE_LSC_ER_SHIFT = 0x08, + OCRDMA_AE_LSC_ER_MASK = (0xFF << + OCRDMA_AE_LSC_ER_SHIFT), + OCRDMA_AE_LSC_QOS_SHIFT = 0x10, + OCRDMA_AE_LSC_QOS_MASK = (0xFFFF << + OCRDMA_AE_LSC_QOS_SHIFT) +}; + +enum { + OCRDMA_AE_LSC_PLINK_DOWN = 0x00, + OCRDMA_AE_LSC_PLINK_UP = 0x01, + OCRDMA_AE_LSC_LLINK_DOWN = 0x02, + OCRDMA_AE_LSC_LLINK_MASK = 0x02, + OCRDMA_AE_LSC_LLINK_UP = 0x03 +}; + /* mailbox command request and responses */ enum { OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, @@ -676,7 +717,7 @@ enum { OCRDMA_PHY_PFLT_SHIFT = 0x18, OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, OCRDMA_QOS_LNKSP_SHIFT = 0x10, - OCRDMA_LLST_MASK = 0xFF, + OCRDMA_LINK_ST_MASK = 0x01, OCRDMA_PLFC_MASK = 0x00000400, OCRDMA_PLFC_SHIFT = 0x8, OCRDMA_PLRFC_MASK = 0x00000200, @@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp { u32 pflt_pps_ld_pnum; u32 qos_lsp; - u32 res_lls; + u32 res_lnk_st; }; enum { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 583001bcfb8f..76e96f97b3f6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev, int status; u8 speed; - status = ocrdma_mbx_get_link_speed(dev, &speed); + status = ocrdma_mbx_get_link_speed(dev, &speed, NULL); if (status) speed = OCRDMA_PHYS_LINK_SPEED_ZERO; diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c index 932d07307454..da326090c2b0 100644 --- a/drivers/input/joystick/db9.c +++ b/drivers/input/joystick/db9.c @@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp) return; } + memset(&db9_parport_cb, 0, sizeof(db9_parport_cb)); db9_parport_cb.flags = PARPORT_FLAG_EXCL; pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index 5a672dcac0d8..eae14d512353 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c @@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp) pads = gc_cfg[port_idx].args + 1; n_pads = gc_cfg[port_idx].nargs - 1; + memset(&gc_parport_cb, 0, sizeof(gc_parport_cb)); gc_parport_cb.flags = PARPORT_FLAG_EXCL; pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 9f5bca26bd2f..77f575dd0901 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp) n_buttons = tgfx_cfg[port_idx].args + 1; n_devs = tgfx_cfg[port_idx].nargs - 1; + memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb)); tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 9c07fe911075..70a893a17467 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp) w->parport = pp; + memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb)); walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; walkera0701_parport_cb.irq_func = walkera0701_irq_handler; walkera0701_parport_cb.private = w; diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c index 4bf678541496..d5994a745ffa 100644 --- a/drivers/input/misc/arizona-haptics.c +++ b/drivers/input/misc/arizona-haptics.c @@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work) ret = regmap_update_bits(arizona->regmap, ARIZONA_HAPTICS_CONTROL_1, - ARIZONA_HAP_CTRL_MASK, - 1 << ARIZONA_HAP_CTRL_SHIFT); + ARIZONA_HAP_CTRL_MASK, 0); if (ret != 0) { dev_err(arizona->dev, "Failed to stop haptics: %d\n", ret); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 5e1665bbaa0b..2f589857a039 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -41,6 +41,7 @@ #define DRIVER_NAME "elan_i2c" #define ELAN_DRIVER_VERSION "1.6.1" +#define ELAN_VENDOR_ID 0x04f3 #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data) input->name = "Elan Touchpad"; input->id.bustype = BUS_I2C; + input->id.vendor = ELAN_VENDOR_ID; + input->id.product = data->product_id; input_set_drvdata(input, data); error = input_mt_init_slots(input, ETP_MAX_FINGERS, diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 92c31b8f8fb4..1edfac78d4ac 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp) { struct pardev_cb parkbd_parport_cb; + memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb)); parkbd_parport_cb.irq_func = parkbd_interrupt; parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index e7f966da6efa..78ca44840d60 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); + /* Verify that a device really has an endpoint */ + if (intf->altsetting[0].desc.bNumEndpoints < 1) { + dev_err(&intf->dev, + "interface has %d endpoints, but must have minimum 1\n", + intf->altsetting[0].desc.bNumEndpoints); + err = -EINVAL; + goto fail3; + } endpoint = &intf->altsetting[0].endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives @@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) if (i == ARRAY_SIZE(speeds)) { dev_info(&intf->dev, "Aiptek tried all speeds, no sane response\n"); + err = -EINVAL; goto fail3; } diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index c5622058c22b..2d5794ec338b 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = { { } }; +static unsigned int chromebook_tp_buttons[] = { + KEY_RESERVED, + KEY_RESERVED, + KEY_RESERVED, + KEY_RESERVED, + KEY_RESERVED, + BTN_LEFT +}; + +static struct mxt_acpi_platform_data chromebook_platform_data[] = { + { + /* Touchpad */ + .hid = "ATML0000", + .pdata = { + .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons), + .t19_keymap = chromebook_tp_buttons, + }, + }, + { + /* Touchscreen */ + .hid = "ATML0001", + }, + { } +}; + static const struct dmi_system_id mxt_dmi_table[] = { { /* 2015 Google Pixel */ @@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = { }, .driver_data = samus_platform_data, }, + { + /* Other Google Chromebooks */ + .ident = "Chromebook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), + }, + .driver_data = chromebook_platform_data, + }, { } }; @@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = { { "qt602240_ts", 0 }, { "atmel_mxt_ts", 0 }, { "atmel_mxt_tp", 0 }, + { "maxtouch", 0 }, { "mXT224", 0 }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 17cc20ef4923..ac09855fa435 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev) disable_irq(client->irq); - if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { + if (device_may_wakeup(dev)) { + /* + * The device will automatically enter idle mode + * that has reduced power consumption. + */ + ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0); + } else if (ts->keep_power_in_suspend) { for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { error = elants_i2c_send(client, set_sleep_cmd, sizeof(set_sleep_cmd)); @@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev) dev_err(&client->dev, "suspend command failed: %d\n", error); } - - if (device_may_wakeup(dev)) - ts->wake_irq_enabled = - (enable_irq_wake(client->irq) == 0); } else { elants_i2c_power_off(ts); } @@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev) int retry_cnt; int error; - if (device_may_wakeup(dev) && ts->wake_irq_enabled) - disable_irq_wake(client->irq); - - if (ts->keep_power_in_suspend) { + if (device_may_wakeup(dev)) { + if (ts->wake_irq_enabled) + disable_irq_wake(client->irq); + elants_i2c_sw_reset(client); + } else if (ts->keep_power_in_suspend) { for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { error = elants_i2c_send(client, set_active_cmd, sizeof(set_active_cmd)); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index d214f22ed305..b6c4d03de340 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -444,7 +444,7 @@ static void sur40_process_video(struct sur40_state *sur40) goto err_poll; /* mark as finished */ - v4l2_get_timestamp(&new_buf->vb.timestamp); + new_buf->vb.vb2_buf.timestamp = ktime_get_ns(); new_buf->vb.sequence = sur40->sequence++; new_buf->vb.field = V4L2_FIELD_NONE; vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -644,22 +644,21 @@ static void sur40_disconnect(struct usb_interface *interface) * minimum number: many DMA engines need a minimum of 2 buffers in the * queue and you need to have another available for userspace processing. */ -static int sur40_queue_setup(struct vb2_queue *q, const void *parg, +static int sur40_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct sur40_state *sur40 = vb2_get_drv_priv(q); if (q->num_buffers + *nbuffers < 3) *nbuffers = 3 - q->num_buffers; + alloc_ctxs[0] = sur40->alloc_ctx; - if (fmt && fmt->fmt.pix.sizeimage < sur40_video_format.sizeimage) - return -EINVAL; + if (*nplanes) + return sizes[0] < sur40_video_format.sizeimage ? -EINVAL : 0; *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : sur40_video_format.sizeimage; - alloc_ctxs[0] = sur40->alloc_ctx; + sizes[0] = sur40_video_format.sizeimage; return 0; } diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index d21d4edf7236..7caf2fa237f2 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault) } } +static bool access_error(struct vm_area_struct *vma, struct fault *fault) +{ + unsigned long requested = 0; + + if (fault->flags & PPR_FAULT_EXEC) + requested |= VM_EXEC; + + if (fault->flags & PPR_FAULT_READ) + requested |= VM_READ; + + if (fault->flags & PPR_FAULT_WRITE) + requested |= VM_WRITE; + + return (requested & ~vma->vm_flags) != 0; +} + static void do_fault(struct work_struct *work) { struct fault *fault = container_of(work, struct fault, work); @@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work) goto out; } - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { - /* handle_mm_fault would BUG_ON() */ + /* Check if we have the right permissions on the vma */ + if (access_error(vma, fault)) { up_read(&mm->mmap_sem); handle_fault_error(fault); goto out; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 3a20db4f8604..72d6182666cb 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -21,10 +21,13 @@ #include <linux/device.h> #include <linux/dma-iommu.h> +#include <linux/gfp.h> #include <linux/huge_mm.h> #include <linux/iommu.h> #include <linux/iova.h> #include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/vmalloc.h> int iommu_dma_init(void) { @@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) { struct page **pages; unsigned int i = 0, array_size = count * sizeof(*pages); + unsigned int order = MAX_ORDER; if (array_size <= PAGE_SIZE) pages = kzalloc(array_size, GFP_KERNEL); @@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) while (count) { struct page *page = NULL; - int j, order = __fls(count); + int j; /* * Higher-order allocations are a convenience rather * than a necessity, hence using __GFP_NORETRY until * falling back to single-page allocations. */ - for (order = min(order, MAX_ORDER); order > 0; order--) { + for (order = min_t(unsigned int, order, __fls(count)); + order > 0; order--) { page = alloc_pages(gfp | __GFP_NORETRY, order); if (!page) continue; @@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, size_t s_offset = iova_offset(iovad, s->offset); size_t s_length = s->length; - sg_dma_address(s) = s->offset; + sg_dma_address(s) = s_offset; sg_dma_len(s) = s_length; s->offset -= s_offset; s_length = iova_align(iovad, s_length + s_offset); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f1042daef9ad..ac7387686ddc 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; - pteval = (sg_phys(sg) & PAGE_MASK) | prot; + pteval = page_to_phys(sg_page(sg)) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = sg_phys(sg); + sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; sg->dma_length = sg->length; } return nelems; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index c69e3f9ec958..50464833d0b8 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -484,6 +484,23 @@ struct page_req_dsc { }; #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) + +static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) +{ + unsigned long requested = 0; + + if (req->exe_req) + requested |= VM_EXEC; + + if (req->rd_req) + requested |= VM_READ; + + if (req->wr_req) + requested |= VM_WRITE; + + return (requested & ~vma->vm_flags) != 0; +} + static irqreturn_t prq_event_thread(int irq, void *d) { struct intel_iommu *iommu = d; @@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!vma || address < vma->vm_start) goto invalid; + if (access_error(vma, req)) + goto invalid; + ret = handle_mm_fault(svm->mm, vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 1fae1881648c..c12ba4516df2 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void) * should have X86_FEATURE_CX16 support, this has been confirmed * with Intel hardware guys. */ - if ( cpu_has_cx16 ) + if (boot_cpu_has(X86_FEATURE_CX16)) intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; for_each_iommu(iommu, drhd) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index abae363c7b9b..0e3b0092ec92 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); for_each_sg(sg, s, nents, i) { - phys_addr_t phys = sg_phys(s); + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; /* * We are mapping on IOMMU page boundaries, so offset within diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 8cf605fa9946..dfb868e2d129 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = { static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { - phys_addr_t ttbr; + u64 ttbr; /* * Allocate the page table operations. diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 4d7294e5d982..11fc2a27fa2e 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,6 +8,11 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select MULTI_IRQ_HANDLER +config ARM_GIC_MAX_NR + int + default 2 if ARCH_REALVIEW + default 1 + config ARM_GIC_V2M bool depends on ARM_GIC @@ -27,6 +32,14 @@ config ARM_GIC_V3_ITS bool select PCI_MSI_IRQ_DOMAIN +config HISILICON_IRQ_MBIGEN + bool "Support mbigen interrupt controller" + default n + depends on ARM_GIC_V3 && ARM_GIC_V3_ITS && GENERIC_MSI_IRQ_DOMAIN + help + Enable the mbigen interrupt controller used on + Hisilicon platform. + config ARM_NVIC bool select IRQ_DOMAIN @@ -138,6 +151,12 @@ config TB10X_IRQC select IRQ_DOMAIN select GENERIC_IRQ_CHIP +config TS4800_IRQ + tristate "TS-4800 IRQ controller" + select IRQ_DOMAIN + help + Support for the TS-4800 FPGA IRQ controller + config VERSATILE_FPGA_IRQ bool select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 177f78f6e6d6..d4c2e4ebc308 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -21,9 +21,11 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o +obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o @@ -39,6 +41,7 @@ obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_ST_IRQCHIP) += irq-st.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o +obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index f68708281fcf..963065a0d774 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -21,6 +21,9 @@ #include <linux/irqdomain.h> #include <asm/exception.h> +#define LOCAL_CONTROL 0x000 +#define LOCAL_PRESCALER 0x008 + /* * The low 2 bits identify the CPU that the GPU IRQ goes to, and the * next 2 bits identify the CPU that the GPU FIQ goes to. @@ -50,14 +53,16 @@ /* Same status bits as above, but for FIQ. */ #define LOCAL_FIQ_PENDING0 0x070 /* - * Mailbox0 write-to-set bits. There are 16 mailboxes, 4 per CPU, and + * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and * these bits are organized by mailbox number and then CPU number. We * use mailbox 0 for IPIs. The mailbox's interrupt is raised while * any bit is set. */ #define LOCAL_MAILBOX0_SET0 0x080 -/* Mailbox0 write-to-clear bits. */ +#define LOCAL_MAILBOX3_SET0 0x08c +/* Mailbox write-to-clear bits. */ #define LOCAL_MAILBOX0_CLR0 0x0c0 +#define LOCAL_MAILBOX3_CLR0 0x0cc #define LOCAL_IRQ_CNTPSIRQ 0 #define LOCAL_IRQ_CNTPNSIRQ 1 @@ -162,7 +167,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) u32 stat; stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu); - if (stat & 0x10) { + if (stat & BIT(LOCAL_IRQ_MAILBOX0)) { #ifdef CONFIG_SMP void __iomem *mailbox0 = (intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu); @@ -172,7 +177,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) writel(1 << ipi, mailbox0); handle_IPI(ipi, regs); #endif - } else { + } else if (stat) { u32 hwirq = ffs(stat) - 1; handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); @@ -217,6 +222,24 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { .notifier_call = bcm2836_arm_irqchip_cpu_notify, .priority = 100, }; + +int __init bcm2836_smp_boot_secondary(unsigned int cpu, + struct task_struct *idle) +{ + unsigned long secondary_startup_phys = + (unsigned long)virt_to_phys((void *)secondary_startup); + + dsb(); + writel(secondary_startup_phys, + intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu); + + return 0; +} + +static const struct smp_operations bcm2836_smp_ops __initconst = { + .smp_boot_secondary = bcm2836_smp_boot_secondary, +}; + #endif static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { @@ -234,9 +257,31 @@ bcm2836_arm_irqchip_smp_init(void) register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); + smp_set_ops(&bcm2836_smp_ops); #endif } +/* + * The LOCAL_IRQ_CNT* timer firings are based off of the external + * oscillator with some scaling. The firmware sets up CNTFRQ to + * report 19.2Mhz, but doesn't set up the scaling registers. + */ +static void bcm2835_init_local_timer_frequency(void) +{ + /* + * Set the timer to source from the 19.2Mhz crystal clock (bit + * 8 unset), and only increment by 1 instead of 2 (bit 9 + * unset). + */ + writel(0, intc.base + LOCAL_CONTROL); + + /* + * Set the timer prescaler to 1:1 (timer freq = input freq * + * 2**31 / prescaler) + */ + writel(0x80000000, intc.base + LOCAL_PRESCALER); +} + static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, struct device_node *parent) { @@ -246,6 +291,8 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, node->full_name); } + bcm2835_init_local_timer_frequency(); + intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1, &bcm2836_arm_irqchip_intc_ops, NULL); diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c new file mode 100644 index 000000000000..aa46eb280a7f --- /dev/null +++ b/drivers/irqchip/irq-gic-realview.c @@ -0,0 +1,43 @@ +/* + * Special GIC quirks for the ARM RealView + * Copyright (C) 2015 Linus Walleij + */ +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/bitops.h> +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic.h> + +#define REALVIEW_SYS_LOCK_OFFSET 0x20 +#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74 +#define VERSATILE_LOCK_VAL 0xA05F +#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24) +#define PLD_INTMODE_LEGACY 0x0 +#define PLD_INTMODE_NEW_DCC BIT(22) +#define PLD_INTMODE_NEW_NO_DCC BIT(23) +#define PLD_INTMODE_FIQ_ENABLE BIT(24) + +static int __init +realview_gic_of_init(struct device_node *node, struct device_node *parent) +{ + static struct regmap *map; + + /* The PB11MPCore GIC needs to be configured in the syscon */ + map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon"); + if (!IS_ERR(map)) { + /* new irq mode with no DCC */ + regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, + VERSATILE_LOCK_VAL); + regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1, + PLD_INTMODE_NEW_NO_DCC, + PLD_INTMODE_MASK); + regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000); + pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n"); + } else { + pr_err("TC11MP GIC setup: could not find syscon\n"); + return -ENXIO; + } + return gic_of_init(node, parent); +} +IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 87f8d104acab..c779f83e511d 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -15,9 +15,11 @@ #define pr_fmt(fmt) "GICv2m: " fmt +#include <linux/acpi.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/slab.h> @@ -55,7 +57,7 @@ static DEFINE_SPINLOCK(v2m_lock); struct v2m_data { struct list_head entry; - struct device_node *node; + struct fwnode_handle *fwnode; struct resource res; /* GICv2m resource */ void __iomem *base; /* GICv2m virt address */ u32 spi_start; /* The SPI number that MSIs start */ @@ -138,6 +140,11 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, fwspec.param[0] = 0; fwspec.param[1] = hwirq - 32; fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; } else { return -EINVAL; } @@ -254,7 +261,9 @@ static void gicv2m_teardown(void) list_del(&v2m->entry); kfree(v2m->bm); iounmap(v2m->base); - of_node_put(v2m->node); + of_node_put(to_of_node(v2m->fwnode)); + if (is_fwnode_irqchip(v2m->fwnode)) + irq_domain_free_fwnode(v2m->fwnode); kfree(v2m); } } @@ -268,7 +277,7 @@ static int gicv2m_allocate_domains(struct irq_domain *parent) if (!v2m) return 0; - inner_domain = irq_domain_create_tree(of_node_to_fwnode(v2m->node), + inner_domain = irq_domain_create_tree(v2m->fwnode, &gicv2m_domain_ops, v2m); if (!inner_domain) { pr_err("Failed to create GICv2m domain\n"); @@ -277,10 +286,10 @@ static int gicv2m_allocate_domains(struct irq_domain *parent) inner_domain->bus_token = DOMAIN_BUS_NEXUS; inner_domain->parent = parent; - pci_domain = pci_msi_create_irq_domain(of_node_to_fwnode(v2m->node), + pci_domain = pci_msi_create_irq_domain(v2m->fwnode, &gicv2m_msi_domain_info, inner_domain); - plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(v2m->node), + plat_domain = platform_msi_create_irq_domain(v2m->fwnode, &gicv2m_pmsi_domain_info, inner_domain); if (!pci_domain || !plat_domain) { @@ -296,8 +305,9 @@ static int gicv2m_allocate_domains(struct irq_domain *parent) return 0; } -static int __init gicv2m_init_one(struct device_node *node, - struct irq_domain *parent) +static int __init gicv2m_init_one(struct fwnode_handle *fwnode, + u32 spi_start, u32 nr_spis, + struct resource *res) { int ret; struct v2m_data *v2m; @@ -309,13 +319,9 @@ static int __init gicv2m_init_one(struct device_node *node, } INIT_LIST_HEAD(&v2m->entry); - v2m->node = node; + v2m->fwnode = fwnode; - ret = of_address_to_resource(node, 0, &v2m->res); - if (ret) { - pr_err("Failed to allocate v2m resource.\n"); - goto err_free_v2m; - } + memcpy(&v2m->res, res, sizeof(struct resource)); v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); if (!v2m->base) { @@ -324,10 +330,9 @@ static int __init gicv2m_init_one(struct device_node *node, goto err_free_v2m; } - if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && - !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { - pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", - v2m->spi_start, v2m->nr_spis); + if (spi_start && nr_spis) { + v2m->spi_start = spi_start; + v2m->nr_spis = nr_spis; } else { u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); @@ -359,10 +364,9 @@ static int __init gicv2m_init_one(struct device_node *node, } list_add_tail(&v2m->entry, &v2m_nodes); - pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, - (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, - v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); + pr_info("range%pR, SPI[%d:%d]\n", res, + v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1)); return 0; err_iounmap: @@ -377,19 +381,36 @@ static struct of_device_id gicv2m_device_id[] = { {}, }; -int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) +static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent) { int ret = 0; + struct device_node *node = to_of_node(parent_handle); struct device_node *child; for (child = of_find_matching_node(node, gicv2m_device_id); child; child = of_find_matching_node(child, gicv2m_device_id)) { + u32 spi_start = 0, nr_spis = 0; + struct resource res; + if (!of_find_property(child, "msi-controller", NULL)) continue; - ret = gicv2m_init_one(child, parent); + ret = of_address_to_resource(child, 0, &res); + if (ret) { + pr_err("Failed to allocate v2m resource.\n"); + break; + } + + if (!of_property_read_u32(child, "arm,msi-base-spi", + &spi_start) && + !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis)) + pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n", + spi_start, nr_spis); + + ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res); if (ret) { - of_node_put(node); + of_node_put(child); break; } } @@ -400,3 +421,101 @@ int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) gicv2m_teardown(); return ret; } + +#ifdef CONFIG_ACPI +static int acpi_num_msi; + +static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) +{ + struct v2m_data *data; + + if (WARN_ON(acpi_num_msi <= 0)) + return NULL; + + /* We only return the fwnode of the first MSI frame. */ + data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); + if (!data) + return NULL; + + return data->fwnode; +} + +static int __init +acpi_parse_madt_msi(struct acpi_subtable_header *header, + const unsigned long end) +{ + int ret; + struct resource res; + u32 spi_start = 0, nr_spis = 0; + struct acpi_madt_generic_msi_frame *m; + struct fwnode_handle *fwnode; + + m = (struct acpi_madt_generic_msi_frame *)header; + if (BAD_MADT_ENTRY(m, end)) + return -EINVAL; + + res.start = m->base_address; + res.end = m->base_address + SZ_4K - 1; + res.flags = IORESOURCE_MEM; + + if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) { + spi_start = m->spi_base; + nr_spis = m->spi_count; + + pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n", + spi_start, nr_spis); + } + + fwnode = irq_domain_alloc_fwnode((void *)m->base_address); + if (!fwnode) { + pr_err("Unable to allocate GICv2m domain token\n"); + return -EINVAL; + } + + ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res); + if (ret) + irq_domain_free_fwnode(fwnode); + + return ret; +} + +static int __init gicv2m_acpi_init(struct irq_domain *parent) +{ + int ret; + + if (acpi_num_msi > 0) + return 0; + + acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME, + acpi_parse_madt_msi, 0); + + if (acpi_num_msi <= 0) + goto err_out; + + ret = gicv2m_allocate_domains(parent); + if (ret) + goto err_out; + + pci_msi_register_fwnode_provider(&gicv2m_get_fwnode); + + return 0; + +err_out: + gicv2m_teardown(); + return -EINVAL; +} +#else /* CONFIG_ACPI */ +static int __init gicv2m_acpi_init(struct irq_domain *parent) +{ + return -EINVAL; +} +#endif /* CONFIG_ACPI */ + +int __init gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent) +{ + if (is_of_node(parent_handle)) + return gicv2m_of_init(parent_handle, parent); + + return gicv2m_acpi_init(parent); +} diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index abf2ffaed392..911758c056c1 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -69,6 +69,7 @@ union gic_base { }; struct gic_chip_data { + struct irq_chip chip; union gic_base dist_base; union gic_base cpu_base; #ifdef CONFIG_CPU_PM @@ -99,11 +100,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; -#ifndef MAX_GIC_NR -#define MAX_GIC_NR 1 -#endif - -static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; +static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; #ifdef CONFIG_GIC_NON_BANKED static void __iomem *gic_get_percpu_base(union gic_base *base) @@ -336,7 +333,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); irqnr = irqstat & GICC_IAR_INT_ID_MASK; - if (likely(irqnr > 15 && irqnr < 1021)) { + if (likely(irqnr > 15 && irqnr < 1020)) { if (static_key_true(&supports_deactivate)) writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); handle_domain_irq(gic->domain, irqnr, regs); @@ -383,7 +380,6 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) } static struct irq_chip gic_chip = { - .name = "GIC", .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, @@ -417,8 +413,7 @@ static struct irq_chip gic_eoimode1_chip = { void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) { - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, &gic_data[gic_nr]); } @@ -524,7 +519,7 @@ int gic_cpu_if_down(unsigned int gic_nr) void __iomem *cpu_base; u32 val = 0; - if (gic_nr >= MAX_GIC_NR) + if (gic_nr >= CONFIG_ARM_GIC_MAX_NR) return -EINVAL; cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); @@ -548,8 +543,7 @@ static void gic_dist_save(unsigned int gic_nr) void __iomem *dist_base; int i; - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); gic_irqs = gic_data[gic_nr].gic_irqs; dist_base = gic_data_dist_base(&gic_data[gic_nr]); @@ -587,8 +581,7 @@ static void gic_dist_restore(unsigned int gic_nr) unsigned int i; void __iomem *dist_base; - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); gic_irqs = gic_data[gic_nr].gic_irqs; dist_base = gic_data_dist_base(&gic_data[gic_nr]); @@ -634,8 +627,7 @@ static void gic_cpu_save(unsigned int gic_nr) void __iomem *dist_base; void __iomem *cpu_base; - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); dist_base = gic_data_dist_base(&gic_data[gic_nr]); cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); @@ -664,8 +656,7 @@ static void gic_cpu_restore(unsigned int gic_nr) void __iomem *dist_base; void __iomem *cpu_base; - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); dist_base = gic_data_dist_base(&gic_data[gic_nr]); cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); @@ -703,7 +694,7 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) { int i; - for (i = 0; i < MAX_GIC_NR; i++) { + for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { #ifdef CONFIG_GIC_NON_BANKED /* Skip over unused GICs */ if (!gic_data[i].get_base) @@ -835,8 +826,7 @@ void gic_migrate_target(unsigned int new_cpu_id) int i, ror_val, cpu = smp_processor_id(); u32 val, cur_target_mask, active_mask; - if (gic_nr >= MAX_GIC_NR) - BUG(); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); dist_base = gic_data_dist_base(&gic_data[gic_nr]); if (!dist_base) @@ -925,20 +915,15 @@ void __init gic_init_physaddr(struct device_node *node) static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - struct irq_chip *chip = &gic_chip; - - if (static_key_true(&supports_deactivate)) { - if (d->host_data == (void *)&gic_data[0]) - chip = &gic_eoimode1_chip; - } + struct gic_chip_data *gic = d->host_data; if (hw < 32) { irq_set_percpu_devid(irq); - irq_domain_set_info(d, irq, hw, chip, d->host_data, + irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); irq_set_status_flags(irq, IRQ_NOAUTOEN); } else { - irq_domain_set_info(d, irq, hw, chip, d->host_data, + irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); } @@ -972,7 +957,7 @@ static int gic_irq_domain_translate(struct irq_domain *d, return 0; } - if (fwspec->fwnode->type == FWNODE_IRQCHIP) { + if (is_fwnode_irqchip(fwspec->fwnode)) { if(fwspec->param_count != 2) return -EINVAL; @@ -1040,11 +1025,20 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, struct gic_chip_data *gic; int gic_irqs, irq_base, i; - BUG_ON(gic_nr >= MAX_GIC_NR); + BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); gic_check_cpu_features(); gic = &gic_data[gic_nr]; + + /* Initialize irq_chip */ + if (static_key_true(&supports_deactivate) && gic_nr == 0) { + gic->chip = gic_eoimode1_chip; + } else { + gic->chip = gic_chip; + gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); + } + #ifdef CONFIG_GIC_NON_BANKED if (percpu_offset) { /* Frankein-GIC without banked registers... */ unsigned int cpu; @@ -1196,7 +1190,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) return true; } -static int __init +int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *cpu_base; @@ -1234,7 +1228,7 @@ gic_of_init(struct device_node *node, struct device_node *parent) } if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) - gicv2m_of_init(node, gic_data[gic_cnt].domain); + gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain); gic_cnt++; return 0; @@ -1359,6 +1353,10 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(NULL, gic_data[0].domain); + return 0; } IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c new file mode 100644 index 000000000000..4dd3eb8a40b3 --- /dev/null +++ b/drivers/irqchip/irq-mbigen.c @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved. + * Author: Jun Ma <majun258@huawei.com> + * Author: Yun Wu <wuyun.wu@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Interrupt numbers per mbigen node supported */ +#define IRQS_PER_MBIGEN_NODE 128 + +/* 64 irqs (Pin0-pin63) are reserved for each mbigen chip */ +#define RESERVED_IRQ_PER_MBIGEN_CHIP 64 + +/* The maximum IRQ pin number of mbigen chip(start from 0) */ +#define MAXIMUM_IRQ_PIN_NUM 1407 + +/** + * In mbigen vector register + * bit[21:12]: event id value + * bit[11:0]: device id + */ +#define IRQ_EVENT_ID_SHIFT 12 +#define IRQ_EVENT_ID_MASK 0x3ff + +/* register range of each mbigen node */ +#define MBIGEN_NODE_OFFSET 0x1000 + +/* offset of vector register in mbigen node */ +#define REG_MBIGEN_VEC_OFFSET 0x200 + +/** + * offset of clear register in mbigen node + * This register is used to clear the status + * of interrupt + */ +#define REG_MBIGEN_CLEAR_OFFSET 0xa000 + +/** + * offset of interrupt type register + * This register is used to configure interrupt + * trigger type + */ +#define REG_MBIGEN_TYPE_OFFSET 0x0 + +/** + * struct mbigen_device - holds the information of mbigen device. + * + * @pdev: pointer to the platform device structure of mbigen chip. + * @base: mapped address of this mbigen chip. + */ +struct mbigen_device { + struct platform_device *pdev; + void __iomem *base; +}; + +static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) +{ + unsigned int nid, pin; + + hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; + nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; + pin = hwirq % IRQS_PER_MBIGEN_NODE; + + return pin * 4 + nid * MBIGEN_NODE_OFFSET + + REG_MBIGEN_VEC_OFFSET; +} + +static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, + u32 *mask, u32 *addr) +{ + unsigned int nid, irq_ofst, ofst; + + hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; + nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; + irq_ofst = hwirq % IRQS_PER_MBIGEN_NODE; + + *mask = 1 << (irq_ofst % 32); + ofst = irq_ofst / 32 * 4; + + *addr = ofst + nid * MBIGEN_NODE_OFFSET + + REG_MBIGEN_TYPE_OFFSET; +} + +static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, + u32 *mask, u32 *addr) +{ + unsigned int ofst; + + hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; + ofst = hwirq / 32 * 4; + + *mask = 1 << (hwirq % 32); + *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; +} + +static void mbigen_eoi_irq(struct irq_data *data) +{ + void __iomem *base = data->chip_data; + u32 mask, addr; + + get_mbigen_clear_reg(data->hwirq, &mask, &addr); + + writel_relaxed(mask, base + addr); + + irq_chip_eoi_parent(data); +} + +static int mbigen_set_type(struct irq_data *data, unsigned int type) +{ + void __iomem *base = data->chip_data; + u32 mask, addr, val; + + if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + get_mbigen_type_reg(data->hwirq, &mask, &addr); + + val = readl_relaxed(base + addr); + + if (type == IRQ_TYPE_LEVEL_HIGH) + val |= mask; + else + val &= ~mask; + + writel_relaxed(val, base + addr); + + return 0; +} + +static struct irq_chip mbigen_irq_chip = { + .name = "mbigen-v2", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = mbigen_eoi_irq, + .irq_set_type = mbigen_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct irq_data *d = irq_get_irq_data(desc->irq); + void __iomem *base = d->chip_data; + u32 val; + + base += get_mbigen_vec_reg(d->hwirq); + val = readl_relaxed(base); + + val &= ~(IRQ_EVENT_ID_MASK << IRQ_EVENT_ID_SHIFT); + val |= (msg->data << IRQ_EVENT_ID_SHIFT); + + /* The address of doorbell is encoded in mbigen register by default + * So,we don't need to program the doorbell address at here + */ + writel_relaxed(val, base); +} + +static int mbigen_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if ((fwspec->param[0] > MAXIMUM_IRQ_PIN_NUM) || + (fwspec->param[0] < RESERVED_IRQ_PER_MBIGEN_CHIP)) + return -EINVAL; + else + *hwirq = fwspec->param[0]; + + /* If there is no valid irq type, just use the default type */ + if ((fwspec->param[1] == IRQ_TYPE_EDGE_RISING) || + (fwspec->param[1] == IRQ_TYPE_LEVEL_HIGH)) + *type = fwspec->param[1]; + else + return -EINVAL; + + return 0; + } + return -EINVAL; +} + +static int mbigen_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *args) +{ + struct irq_fwspec *fwspec = args; + irq_hw_number_t hwirq; + unsigned int type; + struct mbigen_device *mgn_chip; + int i, err; + + err = mbigen_domain_translate(domain, fwspec, &hwirq, &type); + if (err) + return err; + + err = platform_msi_domain_alloc(domain, virq, nr_irqs); + if (err) + return err; + + mgn_chip = platform_msi_get_host_data(domain); + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &mbigen_irq_chip, mgn_chip->base); + + return 0; +} + +static struct irq_domain_ops mbigen_domain_ops = { + .translate = mbigen_domain_translate, + .alloc = mbigen_irq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int mbigen_device_probe(struct platform_device *pdev) +{ + struct mbigen_device *mgn_chip; + struct resource *res; + struct irq_domain *domain; + u32 num_pins; + + mgn_chip = devm_kzalloc(&pdev->dev, sizeof(*mgn_chip), GFP_KERNEL); + if (!mgn_chip) + return -ENOMEM; + + mgn_chip->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mgn_chip->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mgn_chip->base)) + return PTR_ERR(mgn_chip->base); + + if (of_property_read_u32(pdev->dev.of_node, "num-pins", &num_pins) < 0) { + dev_err(&pdev->dev, "No num-pins property\n"); + return -EINVAL; + } + + domain = platform_msi_create_device_domain(&pdev->dev, num_pins, + mbigen_write_msg, + &mbigen_domain_ops, + mgn_chip); + + if (!domain) + return -ENOMEM; + + platform_set_drvdata(pdev, mgn_chip); + + dev_info(&pdev->dev, "Allocated %d MSIs\n", num_pins); + + return 0; +} + +static const struct of_device_id mbigen_of_match[] = { + { .compatible = "hisilicon,mbigen-v2" }, + { /* END */ } +}; +MODULE_DEVICE_TABLE(of, mbigen_of_match); + +static struct platform_driver mbigen_platform_driver = { + .driver = { + .name = "Hisilicon MBIGEN-V2", + .owner = THIS_MODULE, + .of_match_table = mbigen_of_match, + }, + .probe = mbigen_device_probe, +}; + +module_platform_driver(mbigen_platform_driver); + +MODULE_AUTHOR("Jun Ma <majun258@huawei.com>"); +MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hisilicon MBI Generator driver"); diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index 8587d0f8d8c0..9d1bcfc33e4c 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -47,6 +47,7 @@ #define INTC_ILR0 0x0100 #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */ +#define SPURIOUSIRQ_MASK (0x1ffffff << 7) #define INTCPS_NR_ILR_REGS 128 #define INTCPS_NR_MIR_REGS 4 @@ -207,7 +208,6 @@ static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base) ct = gc->chip_types; ct->type = IRQ_TYPE_LEVEL_MASK; - ct->handler = handle_level_irq; ct->chip.irq_ack = omap_mask_ack_irq; ct->chip.irq_mask = irq_gc_mask_disable_reg; @@ -330,11 +330,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node) static asmlinkage void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs) { + extern unsigned long irq_err_count; u32 irqnr; irqnr = intc_readl(INTC_SIR); + + /* + * A spurious IRQ can result if interrupt that triggered the + * sorting is no longer active during the sorting (10 INTC + * functional clock cycles after interrupt assertion). Or a + * change in interrupt mask affected the result during sorting + * time. There is no special handling required except ignoring + * the SIR register value just read and retrying. + * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K + * + * Many a times, a spurious interrupt situation has been fixed + * by adding a flush for the posted write acking the IRQ in + * the device driver. Typically, this is going be the device + * driver whose interrupt was handled just before the spurious + * IRQ occurred. Pay attention to those device drivers if you + * run into hitting the spurious IRQ condition below. + */ + if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) { + pr_err_once("%s: spurious irq!\n", __func__); + irq_err_count++; + omap_ack_irq(NULL); + return; + } + irqnr &= ACTIVEIRQ_MASK; - WARN_ONCE(!irqnr, "Spurious IRQ ?\n"); handle_domain_irq(domain, irqnr, regs); } diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c index 6fd30d5ee14d..c378768d75b3 100644 --- a/drivers/irqchip/irq-renesas-h8300h.c +++ b/drivers/irqchip/irq-renesas-h8300h.c @@ -21,9 +21,9 @@ static const char ipr_bit[] = { 10, 10, 10, 10, 9, 9, 9, 9, }; -static void *intc_baseaddr; +static void __iomem *intc_baseaddr; -#define IPR ((unsigned long)intc_baseaddr + 6) +#define IPR (intc_baseaddr + 6) static void h8300h_disable_irq(struct irq_data *data) { @@ -81,8 +81,8 @@ static int __init h8300h_intc_of_init(struct device_node *intc, BUG_ON(!intc_baseaddr); /* All interrupt priority low */ - ctrl_outb(0x00, IPR + 0); - ctrl_outb(0x00, IPR + 1); + writeb(0x00, IPR + 0); + writeb(0x00, IPR + 1); domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); BUG_ON(!domain); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index c325806561be..713177d97c7a 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -31,7 +31,6 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/of_device.h> -#include <linux/platform_data/irq-renesas-intc-irqpin.h> #include <linux/pm_runtime.h> #define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */ @@ -75,18 +74,20 @@ struct intc_irqpin_irq { struct intc_irqpin_priv { struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR]; struct intc_irqpin_irq irq[INTC_IRQPIN_MAX]; - struct renesas_intc_irqpin_config config; - unsigned int number_of_irqs; + unsigned int sense_bitfield_width; struct platform_device *pdev; struct irq_chip irq_chip; struct irq_domain *irq_domain; struct clk *clk; - bool shared_irqs; + unsigned shared_irqs:1; + unsigned needs_clk:1; u8 shared_irq_mask; }; -struct intc_irqpin_irlm_config { +struct intc_irqpin_config { unsigned int irlm_bit; + unsigned needs_irlm:1; + unsigned needs_clk:1; }; static unsigned long intc_irqpin_read32(void __iomem *iomem) @@ -171,7 +172,7 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) { /* The SENSE register is assumed to be 32-bit. */ - int bitfield_width = p->config.sense_bitfield_width; + int bitfield_width = p->sense_bitfield_width; int shift = 32 - (irq + 1) * bitfield_width; dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); @@ -361,8 +362,15 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { .xlate = irq_domain_xlate_twocell, }; -static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a777x = { +static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = { .irlm_bit = 23, /* ICR0.IRLM0 */ + .needs_irlm = 1, + .needs_clk = 0, +}; + +static const struct intc_irqpin_config intc_irqpin_rmobile = { + .needs_irlm = 0, + .needs_clk = 1, }; static const struct of_device_id intc_irqpin_dt_ids[] = { @@ -371,14 +379,18 @@ static const struct of_device_id intc_irqpin_dt_ids[] = { .data = &intc_irqpin_irlm_r8a777x }, { .compatible = "renesas,intc-irqpin-r8a7779", .data = &intc_irqpin_irlm_r8a777x }, + { .compatible = "renesas,intc-irqpin-r8a7740", + .data = &intc_irqpin_rmobile }, + { .compatible = "renesas,intc-irqpin-sh73a0", + .data = &intc_irqpin_rmobile }, {}, }; MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); static int intc_irqpin_probe(struct platform_device *pdev) { + const struct intc_irqpin_config *config = NULL; struct device *dev = &pdev->dev; - struct renesas_intc_irqpin_config *pdata = dev->platform_data; const struct of_device_id *of_id; struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; @@ -388,6 +400,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(dev); + bool control_parent; + unsigned int nirqs; int ref_irq; int ret; int k; @@ -399,23 +413,28 @@ static int intc_irqpin_probe(struct platform_device *pdev) } /* deal with driver instance configuration */ - if (pdata) { - memcpy(&p->config, pdata, sizeof(*pdata)); - } else { - of_property_read_u32(dev->of_node, "sense-bitfield-width", - &p->config.sense_bitfield_width); - p->config.control_parent = of_property_read_bool(dev->of_node, - "control-parent"); - } - if (!p->config.sense_bitfield_width) - p->config.sense_bitfield_width = 4; /* default to 4 bits */ + of_property_read_u32(dev->of_node, "sense-bitfield-width", + &p->sense_bitfield_width); + control_parent = of_property_read_bool(dev->of_node, "control-parent"); + if (!p->sense_bitfield_width) + p->sense_bitfield_width = 4; /* default to 4 bits */ p->pdev = pdev; platform_set_drvdata(pdev, p); + of_id = of_match_device(intc_irqpin_dt_ids, dev); + if (of_id && of_id->data) { + config = of_id->data; + p->needs_clk = config->needs_clk; + } + p->clk = devm_clk_get(dev, NULL); if (IS_ERR(p->clk)) { - dev_warn(dev, "unable to get clock\n"); + if (p->needs_clk) { + dev_err(dev, "unable to get clock\n"); + ret = PTR_ERR(p->clk); + goto err0; + } p->clk = NULL; } @@ -443,8 +462,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) p->irq[k].requested_irq = irq->start; } - p->number_of_irqs = k; - if (p->number_of_irqs < 1) { + nirqs = k; + if (nirqs < 1) { dev_err(dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err0; @@ -485,20 +504,16 @@ static int intc_irqpin_probe(struct platform_device *pdev) } /* configure "individual IRQ mode" where needed */ - of_id = of_match_device(intc_irqpin_dt_ids, dev); - if (of_id && of_id->data) { - const struct intc_irqpin_irlm_config *irlm_config = of_id->data; - + if (config && config->needs_irlm) { if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, - irlm_config->irlm_bit, - 1, 1); + config->irlm_bit, 1, 1); else dev_warn(dev, "unable to select IRLM mode\n"); } /* mask all interrupts using priority */ - for (k = 0; k < p->number_of_irqs; k++) + for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 1); /* clear all pending interrupts */ @@ -506,16 +521,16 @@ static int intc_irqpin_probe(struct platform_device *pdev) /* scan for shared interrupt lines */ ref_irq = p->irq[0].requested_irq; - p->shared_irqs = true; - for (k = 1; k < p->number_of_irqs; k++) { + p->shared_irqs = 1; + for (k = 1; k < nirqs; k++) { if (ref_irq != p->irq[k].requested_irq) { - p->shared_irqs = false; + p->shared_irqs = 0; break; } } /* use more severe masking method if requested */ - if (p->config.control_parent) { + if (control_parent) { enable_fn = intc_irqpin_irq_enable_force; disable_fn = intc_irqpin_irq_disable_force; } else if (!p->shared_irqs) { @@ -534,9 +549,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_simple(dev->of_node, - p->number_of_irqs, - p->config.irq_base, + p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0, &intc_irqpin_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; @@ -555,7 +568,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) } } else { /* request interrupts one by one */ - for (k = 0; k < p->number_of_irqs; k++) { + for (k = 0; k < nirqs; k++) { if (devm_request_irq(dev, p->irq[k].requested_irq, intc_irqpin_irq_handler, 0, name, &p->irq[k])) { @@ -567,17 +580,10 @@ static int intc_irqpin_probe(struct platform_device *pdev) } /* unmask all interrupts on prio level */ - for (k = 0; k < p->number_of_irqs; k++) + for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 0); - dev_info(dev, "driving %d irqs\n", p->number_of_irqs); - - /* warn in case of mismatch if irq base is specified */ - if (p->config.irq_base) { - if (p->config.irq_base != p->irq[0].domain_irq) - dev_warn(dev, "irq base mismatch (%d/%d)\n", - p->config.irq_base, p->irq[0].domain_irq); - } + dev_info(dev, "driving %d irqs\n", nirqs); return 0; diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 4ef178078e5b..0820f67cc9a7 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -50,6 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { .enable = 0x34, }; +static struct sunxi_sc_nmi_reg_offs sun9i_reg_offs = { + .ctrl = 0x00, + .pend = 0x08, + .enable = 0x04, +}; + static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val) { @@ -207,3 +213,10 @@ static int __init sun7i_sc_nmi_irq_init(struct device_node *node, return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs); } IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); + +static int __init sun9i_nmi_irq_init(struct device_node *node, + struct device_node *parent) +{ + return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs); +} +IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init); diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c new file mode 100644 index 000000000000..4192bdcd2734 --- /dev/null +++ b/drivers/irqchip/irq-ts4800.c @@ -0,0 +1,163 @@ +/* + * Multiplexed-IRQs driver for TS-4800's FPGA + * + * Copyright (c) 2015 - Savoir-faire Linux + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +#define IRQ_MASK 0x4 +#define IRQ_STATUS 0x8 + +struct ts4800_irq_data { + void __iomem *base; + struct irq_domain *domain; + struct irq_chip irq_chip; +}; + +static void ts4800_irq_mask(struct irq_data *d) +{ + struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); + u16 reg = readw(data->base + IRQ_MASK); + u16 mask = 1 << d->hwirq; + + writew(reg | mask, data->base + IRQ_MASK); +} + +static void ts4800_irq_unmask(struct irq_data *d) +{ + struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); + u16 reg = readw(data->base + IRQ_MASK); + u16 mask = 1 << d->hwirq; + + writew(reg & ~mask, data->base + IRQ_MASK); +} + +static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct ts4800_irq_data *data = d->host_data; + + irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq); + irq_set_chip_data(irq, data); + irq_set_noprobe(irq); + + return 0; +} + +struct irq_domain_ops ts4800_ic_ops = { + .map = ts4800_irqdomain_map, + .xlate = irq_domain_xlate_onecell, +}; + +static void ts4800_ic_chained_handle_irq(struct irq_desc *desc) +{ + struct ts4800_irq_data *data = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u16 status = readw(data->base + IRQ_STATUS); + + chained_irq_enter(chip, desc); + + if (unlikely(status == 0)) { + handle_bad_irq(desc); + goto out; + } + + do { + unsigned int bit = __ffs(status); + int irq = irq_find_mapping(data->domain, bit); + + status &= ~(1 << bit); + generic_handle_irq(irq); + } while (status); + +out: + chained_irq_exit(chip, desc); +} + +static int ts4800_ic_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct ts4800_irq_data *data; + struct irq_chip *irq_chip; + struct resource *res; + int parent_irq; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + writew(0xFFFF, data->base + IRQ_MASK); + + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { + dev_err(&pdev->dev, "failed to get parent IRQ\n"); + return -EINVAL; + } + + irq_chip = &data->irq_chip; + irq_chip->name = dev_name(&pdev->dev); + irq_chip->irq_mask = ts4800_irq_mask; + irq_chip->irq_unmask = ts4800_irq_unmask; + + data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data); + if (!data->domain) { + dev_err(&pdev->dev, "cannot add IRQ domain\n"); + return -ENOMEM; + } + + irq_set_chained_handler_and_data(parent_irq, + ts4800_ic_chained_handle_irq, data); + + platform_set_drvdata(pdev, data); + + return 0; +} + +static int ts4800_ic_remove(struct platform_device *pdev) +{ + struct ts4800_irq_data *data = platform_get_drvdata(pdev); + + irq_domain_remove(data->domain); + + return 0; +} + +static const struct of_device_id ts4800_ic_of_match[] = { + { .compatible = "technologic,ts4800-irqc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ts4800_ic_of_match); + +static struct platform_driver ts4800_ic_driver = { + .probe = ts4800_ic_probe, + .remove = ts4800_ic_remove, + .driver = { + .name = "ts4800-irqc", + .of_match_table = ts4800_ic_of_match, + }, +}; +module_platform_driver(ts4800_ic_driver); + +MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ts4800_irqc"); diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c index 4c48fa88a03d..cb9d8ec37507 100644 --- a/drivers/irqchip/irq-zevio.c +++ b/drivers/irqchip/irq-zevio.c @@ -43,8 +43,7 @@ static void __iomem *zevio_irq_io; static void zevio_irq_ack(struct irq_data *irqd) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd); - struct irq_chip_regs *regs = - &container_of(irqd->chip, struct irq_chip_type, chip)->regs; + struct irq_chip_regs *regs = &irq_data_get_chip_type(irqd)->regs; readl(gc->reg_base + regs->ack); } diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile index f1f777570e8e..91c81965e7ca 100644 --- a/drivers/isdn/Makefile +++ b/drivers/isdn/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_ISDN_DIVERSION) += divert/ obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/ obj-$(CONFIG_ISDN_DRV_ICN) += icn/ obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/ -obj-$(CONFIG_ISDN_DRV_SC) += sc/ obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ obj-$(CONFIG_HYSDN) += hysdn/ diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index c3a1b061838d..68073d0da0e3 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c @@ -37,7 +37,7 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for IBM Active 2000 ISDN card"); MODULE_AUTHOR("Fritz Elfert"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(act_bus, "BusType of first card, 1=ISA, 2=MCA, 3=PCMCIA, currently only ISA"); -MODULE_PARM_DESC(membase, "Base port address of first card"); +MODULE_PARM_DESC(act_port, "Base port address of first card"); MODULE_PARM_DESC(act_irq, "IRQ of first card"); MODULE_PARM_DESC(act_id, "ID-String of first card"); module_param(act_bus, int, 0); diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 375be509e95f..2a506fe0c8a4 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs) struct sk_buff *skb = bcs->tx_skb; int sent = -EOPNOTSUPP; - if (!tty || !tty->driver || !skb) - return -EINVAL; + WARN_ON(!tty || !tty->ops || !skb); if (!skb->len) { dev_kfree_skb_any(skb); @@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs) unsigned long flags; int sent = 0; - if (!tty || !tty->driver) - return -EFAULT; + WARN_ON(!tty || !tty->ops); cb = cs->cmdbuf; if (!cb) @@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs) tasklet_kill(&cs->write_tasklet); if (!cs->hw.ser) return; - dev_set_drvdata(&cs->hw.ser->dev.dev, NULL); platform_device_unregister(&cs->hw.ser->dev); - kfree(cs->hw.ser); - cs->hw.ser = NULL; } static void gigaset_device_release(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); + struct cardstate *cs = dev_get_drvdata(dev); - /* adapted from platform_device_release() in drivers/base/platform.c */ - kfree(dev->platform_data); - kfree(pdev->resource); + if (!cs) + return; + dev_set_drvdata(dev, NULL); + kfree(cs->hw.ser); + cs->hw.ser = NULL; } /* @@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, struct tty_struct *tty = cs->hw.ser->tty; unsigned int set, clear; - if (!tty || !tty->driver || !tty->ops->tiocmset) + WARN_ON(!tty || !tty->ops); + /* tiocmset is an optional tty driver method */ + if (!tty->ops->tiocmset) return -EINVAL; set = new_state & ~old_state; clear = old_state & ~new_state; diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index a77eea594b69..cb428b9ee441 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) if (ipac->type & IPAC_TYPE_IPACX) { ista = ReadIPAC(ipac, ISACX_ISTA); - while (ista && cnt--) { + while (ista && --cnt) { pr_debug("%s: ISTA %02x\n", ipac->name, ista); if (ista & IPACX__ICA) ipac_irq(&ipac->hscx[0], ista); @@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) } } else if (ipac->type & IPAC_TYPE_IPAC) { ista = ReadIPAC(ipac, IPAC_ISTA); - while (ista && cnt--) { + while (ista && --cnt) { pr_debug("%s: ISTA %02x\n", ipac->name, ista); if (ista & (IPAC__ICD | IPAC__EXD)) { istad = ReadISAC(isac, ISAC_ISTA); @@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) ista = ReadIPAC(ipac, IPAC_ISTA); } } else if (ipac->type & IPAC_TYPE_HSCX) { - while (cnt) { + while (--cnt) { ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); if (ista) @@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) mISDNisac_irq(isac, istad); if (0 == (ista | istad)) break; - cnt--; } } if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index 9c6650ea848e..f5b714cd7618 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig @@ -130,8 +130,6 @@ source "drivers/isdn/icn/Kconfig" source "drivers/isdn/pcbit/Kconfig" -source "drivers/isdn/sc/Kconfig" - source "drivers/isdn/act2000/Kconfig" endmenu diff --git a/drivers/isdn/sc/Kconfig b/drivers/isdn/sc/Kconfig deleted file mode 100644 index 7469863a7925..000000000000 --- a/drivers/isdn/sc/Kconfig +++ /dev/null @@ -1,8 +0,0 @@ -config ISDN_DRV_SC - tristate "Spellcaster support" - depends on ISA - help - This enables support for the Spellcaster BRI ISDN boards. This - driver currently builds only in a modularized version. - To build it, choose M here: the module will be called sc. - See <file:Documentation/isdn/README.sc> for more information. diff --git a/drivers/isdn/sc/Makefile b/drivers/isdn/sc/Makefile deleted file mode 100644 index 0f2b7d602ac0..000000000000 --- a/drivers/isdn/sc/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# Makefile for the sc ISDN device driver - -# Each configuration option enables a list of files. - -obj-$(CONFIG_ISDN_DRV_SC) += sc.o - -# Multipart objects. - -sc-y := shmem.o init.o packet.o command.o event.o \ - ioctl.o interrupt.o message.o timer.o diff --git a/drivers/isdn/sc/card.h b/drivers/isdn/sc/card.h deleted file mode 100644 index 3da69ee43da7..000000000000 --- a/drivers/isdn/sc/card.h +++ /dev/null @@ -1,131 +0,0 @@ -/* $Id: card.h,v 1.1.10.1 2001/09/23 22:24:59 kai Exp $ - * - * Driver parameters for SpellCaster ISA ISDN adapters - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#ifndef CARD_H -#define CARD_H - -/* - * We need these if they're not already included - */ -#include <linux/timer.h> -#include <linux/time.h> -#include <linux/isdnif.h> -#include <linux/irqreturn.h> -#include "message.h" -#include "scioc.h" - -/* - * Amount of time to wait for a reset to complete - */ -#define CHECKRESET_TIME msecs_to_jiffies(4000) - -/* - * Amount of time between line status checks - */ -#define CHECKSTAT_TIME msecs_to_jiffies(8000) - -/* - * The maximum amount of time to wait for a message response - * to arrive. Use exclusively by send_and_receive - */ -#define SAR_TIMEOUT msecs_to_jiffies(10000) - -/* - * Macro to determine is a card id is valid - */ -#define IS_VALID_CARD(x) ((x >= 0) && (x <= cinst)) - -/* - * Per channel status and configuration - */ -typedef struct { - int l2_proto; - int l3_proto; - char dn[50]; - unsigned long first_sendbuf; /* Offset of first send buffer */ - unsigned int num_sendbufs; /* Number of send buffers */ - unsigned int free_sendbufs; /* Number of free sendbufs */ - unsigned int next_sendbuf; /* Next sequential buffer */ - char eazlist[50]; /* Set with SETEAZ */ - char sillist[50]; /* Set with SETSIL */ - int eazclear; /* Don't accept calls if TRUE */ -} bchan; - -/* - * Everything you want to know about the adapter ... - */ -typedef struct { - int model; - int driverId; /* LL Id */ - char devicename[20]; /* The device name */ - isdn_if *card; /* ISDN4Linux structure */ - bchan *channel; /* status of the B channels */ - char nChannels; /* Number of channels */ - unsigned int interrupt; /* Interrupt number */ - int iobase; /* I/O Base address */ - int ioport[MAX_IO_REGS]; /* Index to I/O ports */ - int shmem_pgport; /* port for the exp mem page reg. */ - int shmem_magic; /* adapter magic number */ - unsigned int rambase; /* Shared RAM base address */ - unsigned int ramsize; /* Size of shared memory */ - RspMessage async_msg; /* Async response message */ - int want_async_messages; /* Snoop the Q ? */ - unsigned char seq_no; /* Next send seq. number */ - struct timer_list reset_timer; /* Check reset timer */ - struct timer_list stat_timer; /* Check startproc timer */ - unsigned char nphystat; /* Latest PhyStat info */ - unsigned char phystat; /* Last PhyStat info */ - HWConfig_pl hwconfig; /* Hardware config info */ - char load_ver[11]; /* CommManage Version string */ - char proc_ver[11]; /* CommEngine Version */ - int StartOnReset; /* Indicates startproc after reset */ - int EngineUp; /* Indicates CommEngine Up */ - int trace_mode; /* Indicate if tracing is on */ - spinlock_t lock; /* local lock */ -} board; - - -extern board *sc_adapter[]; -extern int cinst; - -void memcpy_toshmem(int card, void *dest, const void *src, size_t n); -void memcpy_fromshmem(int card, void *dest, const void *src, size_t n); -int get_card_from_id(int driver); -int indicate_status(int card, int event, ulong Channel, char *Data); -irqreturn_t interrupt_handler(int interrupt, void *cardptr); -int sndpkt(int devId, int channel, int ack, struct sk_buff *data); -void rcvpkt(int card, RspMessage *rcvmsg); -int command(isdn_ctrl *cmd); -int reset(int card); -int startproc(int card); -int send_and_receive(int card, unsigned int procid, unsigned char type, - unsigned char class, unsigned char code, - unsigned char link, unsigned char data_len, - unsigned char *data, RspMessage *mesgdata, int timeout); -void flushreadfifo(int card); -int sendmessage(int card, unsigned int procid, unsigned int type, - unsigned int class, unsigned int code, unsigned int link, - unsigned int data_len, unsigned int *data); -int receivemessage(int card, RspMessage *rspmsg); -int sc_ioctl(int card, scs_ioctl *data); -int setup_buffers(int card, int c); -void sc_check_reset(unsigned long data); -void check_phystat(unsigned long data); - -#endif /* CARD_H */ diff --git a/drivers/isdn/sc/command.c b/drivers/isdn/sc/command.c deleted file mode 100644 index 4a4e66152ce7..000000000000 --- a/drivers/isdn/sc/command.c +++ /dev/null @@ -1,363 +0,0 @@ -/* $Id: command.c,v 1.4.10.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include <linux/module.h> -#include "includes.h" /* This must be first */ -#include "hardware.h" -#include "message.h" -#include "card.h" -#include "scioc.h" - -static int dial(int card, unsigned long channel, setup_parm setup); -static int hangup(int card, unsigned long channel); -static int answer(int card, unsigned long channel); -static int clreaz(int card, unsigned long channel); -static int seteaz(int card, unsigned long channel, char *); -static int setl2(int card, unsigned long arg); -static int setl3(int card, unsigned long arg); -static int acceptb(int card, unsigned long channel); - -#ifdef DEBUG -/* - * Translate command codes to strings - */ -static char *commands[] = { "ISDN_CMD_IOCTL", - "ISDN_CMD_DIAL", - "ISDN_CMD_ACCEPTB", - "ISDN_CMD_ACCEPTB", - "ISDN_CMD_HANGUP", - "ISDN_CMD_CLREAZ", - "ISDN_CMD_SETEAZ", - NULL, - NULL, - NULL, - "ISDN_CMD_SETL2", - NULL, - "ISDN_CMD_SETL3", - NULL, - NULL, - NULL, - NULL, - NULL, }; - -/* - * Translates ISDN4Linux protocol codes to strings for debug messages - */ -static char *l3protos[] = { "ISDN_PROTO_L3_TRANS" }; -static char *l2protos[] = { "ISDN_PROTO_L2_X75I", - "ISDN_PROTO_L2_X75UI", - "ISDN_PROTO_L2_X75BUI", - "ISDN_PROTO_L2_HDLC", - "ISDN_PROTO_L2_TRANS" }; -#endif - -int get_card_from_id(int driver) -{ - int i; - - for (i = 0; i < cinst; i++) { - if (sc_adapter[i]->driverId == driver) - return i; - } - return -ENODEV; -} - -/* - * command - */ - -int command(isdn_ctrl *cmd) -{ - int card; - - card = get_card_from_id(cmd->driver); - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - /* - * Dispatch the command - */ - switch (cmd->command) { - case ISDN_CMD_IOCTL: - { - unsigned long cmdptr; - scs_ioctl ioc; - - memcpy(&cmdptr, cmd->parm.num, sizeof(unsigned long)); - if (copy_from_user(&ioc, (scs_ioctl __user *)cmdptr, - sizeof(scs_ioctl))) { - pr_debug("%s: Failed to verify user space 0x%lx\n", - sc_adapter[card]->devicename, cmdptr); - return -EFAULT; - } - return sc_ioctl(card, &ioc); - } - case ISDN_CMD_DIAL: - return dial(card, cmd->arg, cmd->parm.setup); - case ISDN_CMD_HANGUP: - return hangup(card, cmd->arg); - case ISDN_CMD_ACCEPTD: - return answer(card, cmd->arg); - case ISDN_CMD_ACCEPTB: - return acceptb(card, cmd->arg); - case ISDN_CMD_CLREAZ: - return clreaz(card, cmd->arg); - case ISDN_CMD_SETEAZ: - return seteaz(card, cmd->arg, cmd->parm.num); - case ISDN_CMD_SETL2: - return setl2(card, cmd->arg); - case ISDN_CMD_SETL3: - return setl3(card, cmd->arg); - default: - return -EINVAL; - } - return 0; -} - -/* - * start the onboard firmware - */ -int startproc(int card) -{ - int status; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - /* - * send start msg - */ - status = sendmessage(card, CMPID, cmReqType2, - cmReqClass0, - cmReqStartProc, - 0, 0, NULL); - pr_debug("%s: Sent startProc\n", sc_adapter[card]->devicename); - - return status; -} - - -/* - * Dials the number passed in - */ -static int dial(int card, unsigned long channel, setup_parm setup) -{ - int status; - char Phone[48]; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - /*extract ISDN number to dial from eaz/msn string*/ - strcpy(Phone, setup.phone); - - /*send the connection message*/ - status = sendmessage(card, CEPID, ceReqTypePhy, - ceReqClass1, - ceReqPhyConnect, - (unsigned char)channel + 1, - strlen(Phone), - (unsigned int *)Phone); - - pr_debug("%s: Dialing %s on channel %lu\n", - sc_adapter[card]->devicename, Phone, channel + 1); - - return status; -} - -/* - * Answer an incoming call - */ -static int answer(int card, unsigned long channel) -{ - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - if (setup_buffers(card, channel + 1)) { - hangup(card, channel + 1); - return -ENOBUFS; - } - - indicate_status(card, ISDN_STAT_BCONN, channel, NULL); - pr_debug("%s: Answered incoming call on channel %lu\n", - sc_adapter[card]->devicename, channel + 1); - return 0; -} - -/* - * Hangup up the call on specified channel - */ -static int hangup(int card, unsigned long channel) -{ - int status; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - status = sendmessage(card, CEPID, ceReqTypePhy, - ceReqClass1, - ceReqPhyDisconnect, - (unsigned char)channel + 1, - 0, - NULL); - pr_debug("%s: Sent HANGUP message to channel %lu\n", - sc_adapter[card]->devicename, channel + 1); - return status; -} - -/* - * Set the layer 2 protocol (X.25, HDLC, Raw) - */ -static int setl2(int card, unsigned long arg) -{ - int status = 0; - int protocol, channel; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - protocol = arg >> 8; - channel = arg & 0xff; - sc_adapter[card]->channel[channel].l2_proto = protocol; - - /* - * check that the adapter is also set to the correct protocol - */ - pr_debug("%s: Sending GetFrameFormat for channel %d\n", - sc_adapter[card]->devicename, channel + 1); - status = sendmessage(card, CEPID, ceReqTypeCall, - ceReqClass0, - ceReqCallGetFrameFormat, - (unsigned char)channel + 1, - 1, - (unsigned int *)protocol); - if (status) - return status; - return 0; -} - -/* - * Set the layer 3 protocol - */ -static int setl3(int card, unsigned long channel) -{ - int protocol = channel >> 8; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - sc_adapter[card]->channel[channel].l3_proto = protocol; - return 0; -} - -static int acceptb(int card, unsigned long channel) -{ - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - if (setup_buffers(card, channel + 1)) - { - hangup(card, channel + 1); - return -ENOBUFS; - } - - pr_debug("%s: B-Channel connection accepted on channel %lu\n", - sc_adapter[card]->devicename, channel + 1); - indicate_status(card, ISDN_STAT_BCONN, channel, NULL); - return 0; -} - -static int clreaz(int card, unsigned long arg) -{ - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - strcpy(sc_adapter[card]->channel[arg].eazlist, ""); - sc_adapter[card]->channel[arg].eazclear = 1; - pr_debug("%s: EAZ List cleared for channel %lu\n", - sc_adapter[card]->devicename, arg + 1); - return 0; -} - -static int seteaz(int card, unsigned long arg, char *num) -{ - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - strcpy(sc_adapter[card]->channel[arg].eazlist, num); - sc_adapter[card]->channel[arg].eazclear = 0; - pr_debug("%s: EAZ list for channel %lu set to: %s\n", - sc_adapter[card]->devicename, arg + 1, - sc_adapter[card]->channel[arg].eazlist); - return 0; -} - -int reset(int card) -{ - unsigned long flags; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - indicate_status(card, ISDN_STAT_STOP, 0, NULL); - - if (sc_adapter[card]->EngineUp) { - del_timer(&sc_adapter[card]->stat_timer); - } - - sc_adapter[card]->EngineUp = 0; - - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - init_timer(&sc_adapter[card]->reset_timer); - sc_adapter[card]->reset_timer.function = sc_check_reset; - sc_adapter[card]->reset_timer.data = card; - sc_adapter[card]->reset_timer.expires = jiffies + CHECKRESET_TIME; - add_timer(&sc_adapter[card]->reset_timer); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - - outb(0x1, sc_adapter[card]->ioport[SFT_RESET]); - - pr_debug("%s: Adapter Reset\n", sc_adapter[card]->devicename); - return 0; -} - -void flushreadfifo(int card) -{ - while (inb(sc_adapter[card]->ioport[FIFO_STATUS]) & RF_HAS_DATA) - inb(sc_adapter[card]->ioport[FIFO_READ]); -} diff --git a/drivers/isdn/sc/event.c b/drivers/isdn/sc/event.c deleted file mode 100644 index 833d96c2cf92..000000000000 --- a/drivers/isdn/sc/event.c +++ /dev/null @@ -1,68 +0,0 @@ -/* $Id: event.c,v 1.4.8.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" - -#ifdef DEBUG -static char *events[] = { "ISDN_STAT_STAVAIL", - "ISDN_STAT_ICALL", - "ISDN_STAT_RUN", - "ISDN_STAT_STOP", - "ISDN_STAT_DCONN", - "ISDN_STAT_BCONN", - "ISDN_STAT_DHUP", - "ISDN_STAT_BHUP", - "ISDN_STAT_CINF", - "ISDN_STAT_LOAD", - "ISDN_STAT_UNLOAD", - "ISDN_STAT_BSENT", - "ISDN_STAT_NODCH", - "ISDN_STAT_ADDCH", - "ISDN_STAT_CAUSE" }; -#endif - -int indicate_status(int card, int event, ulong Channel, char *Data) -{ - isdn_ctrl cmd; - -#ifdef DEBUG - pr_debug("%s: Indicating event %s on Channel %d\n", - sc_adapter[card]->devicename, events[event - 256], Channel); -#endif - if (Data != NULL) { - pr_debug("%s: Event data: %s\n", sc_adapter[card]->devicename, - Data); - switch (event) { - case ISDN_STAT_BSENT: - memcpy(&cmd.parm.length, Data, sizeof(cmd.parm.length)); - break; - case ISDN_STAT_ICALL: - memcpy(&cmd.parm.setup, Data, sizeof(cmd.parm.setup)); - break; - default: - strlcpy(cmd.parm.num, Data, sizeof(cmd.parm.num)); - } - } - - cmd.command = event; - cmd.driver = sc_adapter[card]->driverId; - cmd.arg = Channel; - return sc_adapter[card]->card->statcallb(&cmd); -} diff --git a/drivers/isdn/sc/hardware.h b/drivers/isdn/sc/hardware.h deleted file mode 100644 index 81fbe78701f0..000000000000 --- a/drivers/isdn/sc/hardware.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Hardware specific macros, defines and structures - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef HARDWARE_H -#define HARDWARE_H - -#include <asm/param.h> /* For HZ */ - -/* - * General hardware parameters common to all ISA adapters - */ - -#define MAX_CARDS 4 /* The maximum number of cards to - control or probe for. */ - -#define SIGNATURE 0x87654321 /* Board reset signature */ -#define SIG_OFFSET 0x1004 /* Where to find signature in shared RAM */ -#define TRACE_OFFSET 0x1008 /* Trace enable word offset in shared RAM */ -#define BUFFER_OFFSET 0x1800 /* Beginning of buffers */ - -/* I/O Port parameters */ -#define IOBASE_MIN 0x180 /* Lowest I/O port address */ -#define IOBASE_MAX 0x3C0 /* Highest I/O port address */ -#define IOBASE_OFFSET 0x20 /* Inter-board I/O port gap used during - probing */ -#define FIFORD_OFFSET 0x0 -#define FIFOWR_OFFSET 0x400 -#define FIFOSTAT_OFFSET 0x1000 -#define RESET_OFFSET 0x2800 -#define PG0_OFFSET 0x3000 /* Offset from I/O Base for Page 0 register */ -#define PG1_OFFSET 0x3400 /* Offset from I/O Base for Page 1 register */ -#define PG2_OFFSET 0x3800 /* Offset from I/O Base for Page 2 register */ -#define PG3_OFFSET 0x3C00 /* Offset from I/O Base for Page 3 register */ - -#define FIFO_READ 0 /* FIFO Read register */ -#define FIFO_WRITE 1 /* FIFO Write rgister */ -#define LO_ADDR_PTR 2 /* Extended RAM Low Addr Pointer */ -#define HI_ADDR_PTR 3 /* Extended RAM High Addr Pointer */ -#define NOT_USED_1 4 -#define FIFO_STATUS 5 /* FIFO Status Register */ -#define NOT_USED_2 6 -#define MEM_OFFSET 7 -#define SFT_RESET 10 /* Reset Register */ -#define EXP_BASE 11 /* Shared RAM Base address */ -#define EXP_PAGE0 12 /* Shared RAM Page0 register */ -#define EXP_PAGE1 13 /* Shared RAM Page1 register */ -#define EXP_PAGE2 14 /* Shared RAM Page2 register */ -#define EXP_PAGE3 15 /* Shared RAM Page3 register */ -#define IRQ_SELECT 16 /* IRQ selection register */ -#define MAX_IO_REGS 17 /* Total number of I/O ports */ - -/* FIFO register values */ -#define RF_HAS_DATA 0x01 /* fifo has data */ -#define RF_QUART_FULL 0x02 /* fifo quarter full */ -#define RF_HALF_FULL 0x04 /* fifo half full */ -#define RF_NOT_FULL 0x08 /* fifo not full */ -#define WF_HAS_DATA 0x10 /* fifo has data */ -#define WF_QUART_FULL 0x20 /* fifo quarter full */ -#define WF_HALF_FULL 0x40 /* fifo half full */ -#define WF_NOT_FULL 0x80 /* fifo not full */ - -/* Shared RAM parameters */ -#define SRAM_MIN 0xC0000 /* Lowest host shared RAM address */ -#define SRAM_MAX 0xEFFFF /* Highest host shared RAM address */ -#define SRAM_PAGESIZE 0x4000 /* Size of one RAM page (16K) */ - -/* Shared RAM buffer parameters */ -#define BUFFER_SIZE 0x800 /* The size of a buffer in bytes */ -#define BUFFER_BASE BUFFER_OFFSET /* Offset from start of shared RAM - where buffer start */ -#define BUFFERS_MAX 16 /* Maximum number of send/receive - buffers per channel */ -#define HDLC_PROTO 0x01 /* Frame Format for Layer 2 */ - -#define BRI_BOARD 0 -#define POTS_BOARD 1 -#define PRI_BOARD 2 - -/* - * Specific hardware parameters for the DataCommute/BRI - */ -#define BRI_CHANNELS 2 /* Number of B channels */ -#define BRI_BASEPG_VAL 0x98 -#define BRI_MAGIC 0x60000 /* Magic Number */ -#define BRI_MEMSIZE 0x10000 /* Amount of RAM (64K) */ -#define BRI_PARTNO "72-029" -#define BRI_FEATURES ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L3_TRANS; -/* - * Specific hardware parameters for the DataCommute/PRI - */ -#define PRI_CHANNELS 23 /* Number of B channels */ -#define PRI_BASEPG_VAL 0x88 -#define PRI_MAGIC 0x20000 /* Magic Number */ -#define PRI_MEMSIZE 0x100000 /* Amount of RAM (1M) */ -#define PRI_PARTNO "72-030" -#define PRI_FEATURES ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L3_TRANS; - -/* - * Some handy macros - */ - -/* Determine if a channel number is valid for the adapter */ -#define IS_VALID_CHANNEL(y, x) ((x > 0) && (x <= sc_adapter[y]->channels)) - -#endif diff --git a/drivers/isdn/sc/includes.h b/drivers/isdn/sc/includes.h deleted file mode 100644 index 4766e5b77378..000000000000 --- a/drivers/isdn/sc/includes.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#include <linux/errno.h> -#include <asm/io.h> -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/ioport.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/isdnif.h> diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c deleted file mode 100644 index 3597ef47b28a..000000000000 --- a/drivers/isdn/sc/init.c +++ /dev/null @@ -1,549 +0,0 @@ -/* - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include "includes.h" -#include "hardware.h" -#include "card.h" - -MODULE_DESCRIPTION("ISDN4Linux: Driver for Spellcaster card"); -MODULE_AUTHOR("Spellcaster Telecommunications Inc."); -MODULE_LICENSE("GPL"); - -board *sc_adapter[MAX_CARDS]; -int cinst; - -static char devname[] = "scX"; -static const char version[] = "2.0b1"; - -static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCommute/BRI" }; - -/* insmod set parameters */ -static unsigned int io[] = {0, 0, 0, 0}; -static unsigned char irq[] = {0, 0, 0, 0}; -static unsigned long ram[] = {0, 0, 0, 0}; -static bool do_reset; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, byte, NULL, 0); -module_param_array(ram, long, NULL, 0); -module_param(do_reset, bool, 0); - -static int identify_board(unsigned long, unsigned int); - -static int __init sc_init(void) -{ - int b = -1; - int i, j; - int status = -ENODEV; - - unsigned long memsize = 0; - unsigned long features = 0; - isdn_if *interface; - unsigned char channels; - unsigned char pgport; - unsigned long magic; - int model; - int last_base = IOBASE_MIN; - int probe_exhasted = 0; - -#ifdef MODULE - pr_info("SpellCaster ISA ISDN Adapter Driver rev. %s Loaded\n", version); -#else - pr_info("SpellCaster ISA ISDN Adapter Driver rev. %s\n", version); -#endif - pr_info("Copyright (C) 1996 SpellCaster Telecommunications Inc.\n"); - - while (b++ < MAX_CARDS - 1) { - pr_debug("Probing for adapter #%d\n", b); - /* - * Initialize reusable variables - */ - model = -1; - magic = 0; - channels = 0; - pgport = 0; - - /* - * See if we should probe for IO base - */ - pr_debug("I/O Base for board %d is 0x%x, %s probe\n", b, io[b], - io[b] == 0 ? "will" : "won't"); - if (io[b]) { - /* - * No, I/O Base has been provided - */ - for (i = 0; i < MAX_IO_REGS - 1; i++) { - if (!request_region(io[b] + i * 0x400, 1, "sc test")) { - pr_debug("request_region for 0x%x failed\n", io[b] + i * 0x400); - io[b] = 0; - break; - } else - release_region(io[b] + i * 0x400, 1); - } - - /* - * Confirm the I/O Address with a test - */ - if (io[b] == 0) { - pr_debug("I/O Address invalid.\n"); - continue; - } - - outb(0x18, io[b] + 0x400 * EXP_PAGE0); - if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { - pr_debug("I/O Base 0x%x fails test\n", - io[b] + 0x400 * EXP_PAGE0); - continue; - } - } else { - /* - * Yes, probe for I/O Base - */ - if (probe_exhasted) { - pr_debug("All probe addresses exhausted, skipping\n"); - continue; - } - pr_debug("Probing for I/O...\n"); - for (i = last_base; i <= IOBASE_MAX; i += IOBASE_OFFSET) { - int found_io = 1; - if (i == IOBASE_MAX) { - probe_exhasted = 1; /* No more addresses to probe */ - pr_debug("End of Probes\n"); - } - last_base = i + IOBASE_OFFSET; - pr_debug(" checking 0x%x...", i); - for (j = 0; j < MAX_IO_REGS - 1; j++) { - if (!request_region(i + j * 0x400, 1, "sc test")) { - pr_debug("Failed\n"); - found_io = 0; - break; - } else - release_region(i + j * 0x400, 1); - } - - if (found_io) { - io[b] = i; - outb(0x18, io[b] + 0x400 * EXP_PAGE0); - if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { - pr_debug("Failed by test\n"); - continue; - } - pr_debug("Passed\n"); - break; - } - } - if (probe_exhasted) { - continue; - } - } - - /* - * See if we should probe for shared RAM - */ - if (do_reset) { - pr_debug("Doing a SAFE probe reset\n"); - outb(0xFF, io[b] + RESET_OFFSET); - msleep_interruptible(10000); - } - pr_debug("RAM Base for board %d is 0x%lx, %s probe\n", b, - ram[b], ram[b] == 0 ? "will" : "won't"); - - if (ram[b]) { - /* - * No, the RAM base has been provided - * Just look for a signature and ID the - * board model - */ - if (request_region(ram[b], SRAM_PAGESIZE, "sc test")) { - pr_debug("request_region for RAM base 0x%lx succeeded\n", ram[b]); - model = identify_board(ram[b], io[b]); - release_region(ram[b], SRAM_PAGESIZE); - } - } else { - /* - * Yes, probe for free RAM and look for - * a signature and id the board model - */ - for (i = SRAM_MIN; i < SRAM_MAX; i += SRAM_PAGESIZE) { - pr_debug("Checking RAM address 0x%x...\n", i); - if (request_region(i, SRAM_PAGESIZE, "sc test")) { - pr_debug(" request_region succeeded\n"); - model = identify_board(i, io[b]); - release_region(i, SRAM_PAGESIZE); - if (model >= 0) { - pr_debug(" Identified a %s\n", - boardname[model]); - ram[b] = i; - break; - } - pr_debug(" Unidentified or inaccessible\n"); - continue; - } - pr_debug(" request failed\n"); - } - } - /* - * See if we found free RAM and the board model - */ - if (!ram[b] || model < 0) { - /* - * Nope, there was no place in RAM for the - * board, or it couldn't be identified - */ - pr_debug("Failed to find an adapter at 0x%lx\n", ram[b]); - continue; - } - - /* - * Set the board's magic number, memory size and page register - */ - switch (model) { - case PRI_BOARD: - channels = 23; - magic = 0x20000; - memsize = 0x100000; - features = PRI_FEATURES; - break; - - case BRI_BOARD: - case POTS_BOARD: - channels = 2; - magic = 0x60000; - memsize = 0x10000; - features = BRI_FEATURES; - break; - } - switch (ram[b] >> 12 & 0x0F) { - case 0x0: - pr_debug("RAM Page register set to EXP_PAGE0\n"); - pgport = EXP_PAGE0; - break; - - case 0x4: - pr_debug("RAM Page register set to EXP_PAGE1\n"); - pgport = EXP_PAGE1; - break; - - case 0x8: - pr_debug("RAM Page register set to EXP_PAGE2\n"); - pgport = EXP_PAGE2; - break; - - case 0xC: - pr_debug("RAM Page register set to EXP_PAGE3\n"); - pgport = EXP_PAGE3; - break; - - default: - pr_debug("RAM base address doesn't fall on 16K boundary\n"); - continue; - } - - pr_debug("current IRQ: %d b: %d\n", irq[b], b); - - /* - * Make sure we got an IRQ - */ - if (!irq[b]) { - /* - * No interrupt could be used - */ - pr_debug("Failed to acquire an IRQ line\n"); - continue; - } - - /* - * Horray! We found a board, Make sure we can register - * it with ISDN4Linux - */ - interface = kzalloc(sizeof(isdn_if), GFP_KERNEL); - if (interface == NULL) { - /* - * Oops, can't malloc isdn_if - */ - continue; - } - - interface->owner = THIS_MODULE; - interface->hl_hdrlen = 0; - interface->channels = channels; - interface->maxbufsize = BUFFER_SIZE; - interface->features = features; - interface->writebuf_skb = sndpkt; - interface->writecmd = NULL; - interface->command = command; - strcpy(interface->id, devname); - interface->id[2] = '0' + cinst; - - /* - * Allocate the board structure - */ - sc_adapter[cinst] = kzalloc(sizeof(board), GFP_KERNEL); - if (sc_adapter[cinst] == NULL) { - /* - * Oops, can't alloc memory for the board - */ - kfree(interface); - continue; - } - spin_lock_init(&sc_adapter[cinst]->lock); - - if (!register_isdn(interface)) { - /* - * Oops, couldn't register for some reason - */ - kfree(interface); - kfree(sc_adapter[cinst]); - continue; - } - - sc_adapter[cinst]->card = interface; - sc_adapter[cinst]->driverId = interface->channels; - strcpy(sc_adapter[cinst]->devicename, interface->id); - sc_adapter[cinst]->nChannels = channels; - sc_adapter[cinst]->ramsize = memsize; - sc_adapter[cinst]->shmem_magic = magic; - sc_adapter[cinst]->shmem_pgport = pgport; - sc_adapter[cinst]->StartOnReset = 1; - - /* - * Allocate channels status structures - */ - sc_adapter[cinst]->channel = kzalloc(sizeof(bchan) * channels, GFP_KERNEL); - if (sc_adapter[cinst]->channel == NULL) { - /* - * Oops, can't alloc memory for the channels - */ - indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */ - kfree(interface); - kfree(sc_adapter[cinst]); - continue; - } - - /* - * Lock down the hardware resources - */ - sc_adapter[cinst]->interrupt = irq[b]; - if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler, - 0, interface->id, - (void *)(unsigned long) cinst)) { - kfree(sc_adapter[cinst]->channel); - indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */ - kfree(interface); - kfree(sc_adapter[cinst]); - continue; - - } - sc_adapter[cinst]->iobase = io[b]; - for (i = 0; i < MAX_IO_REGS - 1; i++) { - sc_adapter[cinst]->ioport[i] = io[b] + i * 0x400; - request_region(sc_adapter[cinst]->ioport[i], 1, - interface->id); - pr_debug("Requesting I/O Port %#x\n", - sc_adapter[cinst]->ioport[i]); - } - sc_adapter[cinst]->ioport[IRQ_SELECT] = io[b] + 0x2; - request_region(sc_adapter[cinst]->ioport[IRQ_SELECT], 1, - interface->id); - pr_debug("Requesting I/O Port %#x\n", - sc_adapter[cinst]->ioport[IRQ_SELECT]); - sc_adapter[cinst]->rambase = ram[b]; - request_region(sc_adapter[cinst]->rambase, SRAM_PAGESIZE, - interface->id); - - pr_info(" %s (%d) - %s %d channels IRQ %d, I/O Base 0x%x, RAM Base 0x%lx\n", - sc_adapter[cinst]->devicename, - sc_adapter[cinst]->driverId, - boardname[model], channels, irq[b], io[b], ram[b]); - - /* - * reset the adapter to put things in motion - */ - reset(cinst); - - cinst++; - status = 0; - } - if (status) - pr_info("Failed to find any adapters, driver unloaded\n"); - return status; -} - -static void __exit sc_exit(void) -{ - int i, j; - - for (i = 0; i < cinst; i++) { - pr_debug("Cleaning up after adapter %d\n", i); - /* - * kill the timers - */ - del_timer_sync(&(sc_adapter[i]->reset_timer)); - del_timer_sync(&(sc_adapter[i]->stat_timer)); - - /* - * Tell I4L we're toast - */ - indicate_status(i, ISDN_STAT_STOP, 0, NULL); - indicate_status(i, ISDN_STAT_UNLOAD, 0, NULL); - - /* - * Release shared RAM - */ - release_region(sc_adapter[i]->rambase, SRAM_PAGESIZE); - - /* - * Release the IRQ - */ - free_irq(sc_adapter[i]->interrupt, NULL); - - /* - * Reset for a clean start - */ - outb(0xFF, sc_adapter[i]->ioport[SFT_RESET]); - - /* - * Release the I/O Port regions - */ - for (j = 0; j < MAX_IO_REGS - 1; j++) { - release_region(sc_adapter[i]->ioport[j], 1); - pr_debug("Releasing I/O Port %#x\n", - sc_adapter[i]->ioport[j]); - } - release_region(sc_adapter[i]->ioport[IRQ_SELECT], 1); - pr_debug("Releasing I/O Port %#x\n", - sc_adapter[i]->ioport[IRQ_SELECT]); - - /* - * Release any memory we alloced - */ - kfree(sc_adapter[i]->channel); - kfree(sc_adapter[i]->card); - kfree(sc_adapter[i]); - } - pr_info("SpellCaster ISA ISDN Adapter Driver Unloaded.\n"); -} - -static int identify_board(unsigned long rambase, unsigned int iobase) -{ - unsigned int pgport; - unsigned long sig; - DualPortMemory *dpm; - RspMessage rcvmsg; - ReqMessage sndmsg; - HWConfig_pl hwci; - int x; - - pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n", - rambase, iobase); - - /* - * Enable the base pointer - */ - outb(rambase >> 12, iobase + 0x2c00); - - switch (rambase >> 12 & 0x0F) { - case 0x0: - pgport = iobase + PG0_OFFSET; - pr_debug("Page Register offset is 0x%x\n", PG0_OFFSET); - break; - - case 0x4: - pgport = iobase + PG1_OFFSET; - pr_debug("Page Register offset is 0x%x\n", PG1_OFFSET); - break; - - case 0x8: - pgport = iobase + PG2_OFFSET; - pr_debug("Page Register offset is 0x%x\n", PG2_OFFSET); - break; - - case 0xC: - pgport = iobase + PG3_OFFSET; - pr_debug("Page Register offset is 0x%x\n", PG3_OFFSET); - break; - default: - pr_debug("Invalid rambase 0x%lx\n", rambase); - return -1; - } - - /* - * Try to identify a PRI card - */ - outb(PRI_BASEPG_VAL, pgport); - msleep_interruptible(1000); - sig = readl(rambase + SIG_OFFSET); - pr_debug("Looking for a signature, got 0x%lx\n", sig); - if (sig == SIGNATURE) - return PRI_BOARD; - - /* - * Try to identify a PRI card - */ - outb(BRI_BASEPG_VAL, pgport); - msleep_interruptible(1000); - sig = readl(rambase + SIG_OFFSET); - pr_debug("Looking for a signature, got 0x%lx\n", sig); - if (sig == SIGNATURE) - return BRI_BOARD; - - return -1; - - /* - * Try to spot a card - */ - sig = readl(rambase + SIG_OFFSET); - pr_debug("Looking for a signature, got 0x%lx\n", sig); - if (sig != SIGNATURE) - return -1; - - dpm = (DualPortMemory *) rambase; - - memset(&sndmsg, 0, MSG_LEN); - sndmsg.msg_byte_cnt = 3; - sndmsg.type = cmReqType1; - sndmsg.class = cmReqClass0; - sndmsg.code = cmReqHWConfig; - memcpy_toio(&(dpm->req_queue[dpm->req_head++]), &sndmsg, MSG_LEN); - outb(0, iobase + 0x400); - pr_debug("Sent HWConfig message\n"); - /* - * Wait for the response - */ - x = 0; - while ((inb(iobase + FIFOSTAT_OFFSET) & RF_HAS_DATA) && x < 100) { - schedule_timeout_interruptible(1); - x++; - } - if (x == 100) { - pr_debug("Timeout waiting for response\n"); - return -1; - } - - memcpy_fromio(&rcvmsg, &(dpm->rsp_queue[dpm->rsp_tail]), MSG_LEN); - pr_debug("Got HWConfig response, status = 0x%x\n", rcvmsg.rsp_status); - memcpy(&hwci, &(rcvmsg.msg_data.HWCresponse), sizeof(HWConfig_pl)); - pr_debug("Hardware Config: Interface: %s, RAM Size: %ld, Serial: %s\n" - " Part: %s, Rev: %s\n", - hwci.st_u_sense ? "S/T" : "U", hwci.ram_size, - hwci.serial_no, hwci.part_no, hwci.rev_no); - - if (!strncmp(PRI_PARTNO, hwci.part_no, 6)) - return PRI_BOARD; - if (!strncmp(BRI_PARTNO, hwci.part_no, 6)) - return BRI_BOARD; - - return -1; -} - -module_init(sc_init); -module_exit(sc_exit); diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c deleted file mode 100644 index e80cc76bc314..000000000000 --- a/drivers/isdn/sc/interrupt.c +++ /dev/null @@ -1,247 +0,0 @@ -/* $Id: interrupt.c,v 1.4.8.3 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" -#include <linux/interrupt.h> - -/* - * - */ -irqreturn_t interrupt_handler(int dummy, void *card_inst) -{ - - RspMessage rcvmsg; - int channel; - int card = (int)(unsigned long) card_inst; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return IRQ_NONE; - } - - pr_debug("%s: Entered Interrupt handler\n", - sc_adapter[card]->devicename); - - /* - * Pull all of the waiting messages off the response queue - */ - while (!receivemessage(card, &rcvmsg)) { - /* - * Push the message to the adapter structure for - * send_and_receive to snoop - */ - if (sc_adapter[card]->want_async_messages) - memcpy(&(sc_adapter[card]->async_msg), - &rcvmsg, sizeof(RspMessage)); - - channel = (unsigned int) rcvmsg.phy_link_no; - - /* - * Trap Invalid request messages - */ - if (IS_CM_MESSAGE(rcvmsg, 0, 0, Invalid)) { - pr_debug("%s: Invalid request Message, rsp_status = %d\n", - sc_adapter[card]->devicename, - rcvmsg.rsp_status); - break; - } - - /* - * Check for a linkRead message - */ - if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Read)) - { - pr_debug("%s: Received packet 0x%x bytes long at 0x%lx\n", - sc_adapter[card]->devicename, - rcvmsg.msg_data.response.msg_len, - rcvmsg.msg_data.response.buff_offset); - rcvpkt(card, &rcvmsg); - continue; - - } - - /* - * Handle a write acknoledgement - */ - if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Write)) { - pr_debug("%s: Packet Send ACK on channel %d\n", - sc_adapter[card]->devicename, - rcvmsg.phy_link_no); - sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].free_sendbufs++; - continue; - } - - /* - * Handle a connection message - */ - if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Connect)) - { - unsigned int callid; - setup_parm setup; - pr_debug("%s: Connect message: line %d: status %d: cause 0x%x\n", - sc_adapter[card]->devicename, - rcvmsg.phy_link_no, - rcvmsg.rsp_status, - rcvmsg.msg_data.byte_array[2]); - - memcpy(&callid, rcvmsg.msg_data.byte_array, sizeof(int)); - if (callid >= 0x8000 && callid <= 0xFFFF) - { - pr_debug("%s: Got Dial-Out Rsp\n", - sc_adapter[card]->devicename); - indicate_status(card, ISDN_STAT_DCONN, - (unsigned long)rcvmsg.phy_link_no - 1, NULL); - - } - else if (callid >= 0x0000 && callid <= 0x7FFF) - { - int len; - - pr_debug("%s: Got Incoming Call\n", - sc_adapter[card]->devicename); - len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), - sizeof(setup.phone)); - if (len >= sizeof(setup.phone)) - continue; - len = strlcpy(setup.eazmsn, - sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, - sizeof(setup.eazmsn)); - if (len >= sizeof(setup.eazmsn)) - continue; - setup.si1 = 7; - setup.si2 = 0; - setup.plan = 0; - setup.screen = 0; - - indicate_status(card, ISDN_STAT_ICALL, (unsigned long)rcvmsg.phy_link_no - 1, (char *)&setup); - indicate_status(card, ISDN_STAT_DCONN, (unsigned long)rcvmsg.phy_link_no - 1, NULL); - } - continue; - } - - /* - * Handle a disconnection message - */ - if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Disconnect)) - { - pr_debug("%s: disconnect message: line %d: status %d: cause 0x%x\n", - sc_adapter[card]->devicename, - rcvmsg.phy_link_no, - rcvmsg.rsp_status, - rcvmsg.msg_data.byte_array[2]); - - indicate_status(card, ISDN_STAT_BHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL); - indicate_status(card, ISDN_STAT_DHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL); - continue; - - } - - /* - * Handle a startProc engine up message - */ - if (IS_CM_MESSAGE(rcvmsg, 5, 0, MiscEngineUp)) { - pr_debug("%s: Received EngineUp message\n", - sc_adapter[card]->devicename); - sc_adapter[card]->EngineUp = 1; - sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 1, 0, NULL); - sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 2, 0, NULL); - init_timer(&sc_adapter[card]->stat_timer); - sc_adapter[card]->stat_timer.function = check_phystat; - sc_adapter[card]->stat_timer.data = card; - sc_adapter[card]->stat_timer.expires = jiffies + CHECKSTAT_TIME; - add_timer(&sc_adapter[card]->stat_timer); - continue; - } - - /* - * Start proc response - */ - if (IS_CM_MESSAGE(rcvmsg, 2, 0, StartProc)) { - pr_debug("%s: StartProc Response Status %d\n", - sc_adapter[card]->devicename, - rcvmsg.rsp_status); - continue; - } - - /* - * Handle a GetMyNumber Rsp - */ - if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetMyNumber)) { - strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, - rcvmsg.msg_data.byte_array, - sizeof(rcvmsg.msg_data.byte_array)); - continue; - } - - /* - * PhyStatus response - */ - if (IS_CE_MESSAGE(rcvmsg, Phy, 2, Status)) { - unsigned int b1stat, b2stat; - - /* - * Covert the message data to the adapter->phystat code - */ - b1stat = (unsigned int) rcvmsg.msg_data.byte_array[0]; - b2stat = (unsigned int) rcvmsg.msg_data.byte_array[1]; - - sc_adapter[card]->nphystat = (b2stat >> 8) | b1stat; /* endian?? */ - pr_debug("%s: PhyStat is 0x%2x\n", - sc_adapter[card]->devicename, - sc_adapter[card]->nphystat); - continue; - } - - - /* - * Handle a GetFramFormat - */ - if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetFrameFormat)) { - if (rcvmsg.msg_data.byte_array[0] != HDLC_PROTO) { - unsigned int proto = HDLC_PROTO; - /* - * Set board format to HDLC if it wasn't already - */ - pr_debug("%s: current frame format: 0x%x, will change to HDLC\n", - sc_adapter[card]->devicename, - rcvmsg.msg_data.byte_array[0]); - sendmessage(card, CEPID, ceReqTypeCall, - ceReqClass0, - ceReqCallSetFrameFormat, - (unsigned char)channel + 1, - 1, &proto); - } - continue; - } - - /* - * Hmm... - */ - pr_debug("%s: Received unhandled message (%d,%d,%d) link %d\n", - sc_adapter[card]->devicename, - rcvmsg.type, rcvmsg.class, rcvmsg.code, - rcvmsg.phy_link_no); - - } /* while */ - - pr_debug("%s: Exiting Interrupt Handler\n", - sc_adapter[card]->devicename); - return IRQ_HANDLED; -} diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c deleted file mode 100644 index e63983aa1d27..000000000000 --- a/drivers/isdn/sc/ioctl.c +++ /dev/null @@ -1,582 +0,0 @@ -/* - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" -#include "scioc.h" - -static int GetStatus(int card, boardInfo *); - -/* - * Process private IOCTL messages (typically from scctrl) - */ -int sc_ioctl(int card, scs_ioctl *data) -{ - int status; - RspMessage *rcvmsg; - char *spid; - char *dn; - char switchtype; - char speed; - - rcvmsg = kmalloc(sizeof(RspMessage), GFP_KERNEL); - if (!rcvmsg) - return -ENOMEM; - - switch (data->command) { - case SCIOCRESET: /* Perform a hard reset of the adapter */ - { - pr_debug("%s: SCIOCRESET: ioctl received\n", - sc_adapter[card]->devicename); - sc_adapter[card]->StartOnReset = 0; - kfree(rcvmsg); - return reset(card); - } - - case SCIOCLOAD: - { - char *srec; - - srec = kmalloc(SCIOC_SRECSIZE, GFP_KERNEL); - if (!srec) { - kfree(rcvmsg); - return -ENOMEM; - } - pr_debug("%s: SCIOLOAD: ioctl received\n", - sc_adapter[card]->devicename); - if (sc_adapter[card]->EngineUp) { - pr_debug("%s: SCIOCLOAD: command failed, LoadProc while engine running.\n", - sc_adapter[card]->devicename); - kfree(rcvmsg); - kfree(srec); - return -1; - } - - /* - * Get the SRec from user space - */ - if (copy_from_user(srec, data->dataptr, SCIOC_SRECSIZE)) { - kfree(rcvmsg); - kfree(srec); - return -EFAULT; - } - - status = send_and_receive(card, CMPID, cmReqType2, cmReqClass0, cmReqLoadProc, - 0, SCIOC_SRECSIZE, srec, rcvmsg, SAR_TIMEOUT); - kfree(rcvmsg); - kfree(srec); - - if (status) { - pr_debug("%s: SCIOCLOAD: command failed, status = %d\n", - sc_adapter[card]->devicename, status); - return -1; - } - else { - pr_debug("%s: SCIOCLOAD: command successful\n", - sc_adapter[card]->devicename); - return 0; - } - } - - case SCIOCSTART: - { - kfree(rcvmsg); - pr_debug("%s: SCIOSTART: ioctl received\n", - sc_adapter[card]->devicename); - if (sc_adapter[card]->EngineUp) { - pr_debug("%s: SCIOCSTART: command failed, engine already running.\n", - sc_adapter[card]->devicename); - return -1; - } - - sc_adapter[card]->StartOnReset = 1; - startproc(card); - return 0; - } - - case SCIOCSETSWITCH: - { - pr_debug("%s: SCIOSETSWITCH: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the switch type from user space - */ - if (copy_from_user(&switchtype, data->dataptr, sizeof(char))) { - kfree(rcvmsg); - return -EFAULT; - } - - pr_debug("%s: SCIOCSETSWITCH: setting switch type to %d\n", - sc_adapter[card]->devicename, - switchtype); - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallSetSwitchType, - 0, sizeof(char), &switchtype, rcvmsg, SAR_TIMEOUT); - if (!status && !(rcvmsg->rsp_status)) { - pr_debug("%s: SCIOCSETSWITCH: command successful\n", - sc_adapter[card]->devicename); - kfree(rcvmsg); - return 0; - } - else { - pr_debug("%s: SCIOCSETSWITCH: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - return status; - } - } - - case SCIOCGETSWITCH: - { - pr_debug("%s: SCIOGETSWITCH: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the switch type from the board - */ - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, - ceReqCallGetSwitchType, 0, 0, NULL, rcvmsg, SAR_TIMEOUT); - if (!status && !(rcvmsg->rsp_status)) { - pr_debug("%s: SCIOCGETSWITCH: command successful\n", - sc_adapter[card]->devicename); - } - else { - pr_debug("%s: SCIOCGETSWITCH: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - return status; - } - - switchtype = rcvmsg->msg_data.byte_array[0]; - - /* - * Package the switch type and send to user space - */ - if (copy_to_user(data->dataptr, &switchtype, - sizeof(char))) { - kfree(rcvmsg); - return -EFAULT; - } - - kfree(rcvmsg); - return 0; - } - - case SCIOCGETSPID: - { - pr_debug("%s: SCIOGETSPID: ioctl received\n", - sc_adapter[card]->devicename); - - spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); - if (!spid) { - kfree(rcvmsg); - return -ENOMEM; - } - /* - * Get the spid from the board - */ - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetSPID, - data->channel, 0, NULL, rcvmsg, SAR_TIMEOUT); - if (!status) { - pr_debug("%s: SCIOCGETSPID: command successful\n", - sc_adapter[card]->devicename); - } else { - pr_debug("%s: SCIOCGETSPID: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(spid); - kfree(rcvmsg); - return status; - } - strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); - - /* - * Package the switch type and send to user space - */ - if (copy_to_user(data->dataptr, spid, SCIOC_SPIDSIZE)) { - kfree(spid); - kfree(rcvmsg); - return -EFAULT; - } - - kfree(spid); - kfree(rcvmsg); - return 0; - } - - case SCIOCSETSPID: - { - pr_debug("%s: DCBIOSETSPID: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the spid from user space - */ - spid = memdup_user(data->dataptr, SCIOC_SPIDSIZE); - if (IS_ERR(spid)) { - kfree(rcvmsg); - return PTR_ERR(spid); - } - - pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n", - sc_adapter[card]->devicename, data->channel, spid); - status = send_and_receive(card, CEPID, ceReqTypeCall, - ceReqClass0, ceReqCallSetSPID, data->channel, - strlen(spid), spid, rcvmsg, SAR_TIMEOUT); - if (!status && !(rcvmsg->rsp_status)) { - pr_debug("%s: SCIOCSETSPID: command successful\n", - sc_adapter[card]->devicename); - kfree(rcvmsg); - kfree(spid); - return 0; - } - else { - pr_debug("%s: SCIOCSETSPID: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - kfree(spid); - return status; - } - } - - case SCIOCGETDN: - { - pr_debug("%s: SCIOGETDN: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the dn from the board - */ - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, - data->channel, 0, NULL, rcvmsg, SAR_TIMEOUT); - if (!status) { - pr_debug("%s: SCIOCGETDN: command successful\n", - sc_adapter[card]->devicename); - } - else { - pr_debug("%s: SCIOCGETDN: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - return status; - } - - dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); - if (!dn) { - kfree(rcvmsg); - return -ENOMEM; - } - strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); - kfree(rcvmsg); - - /* - * Package the dn and send to user space - */ - if (copy_to_user(data->dataptr, dn, SCIOC_DNSIZE)) { - kfree(dn); - return -EFAULT; - } - kfree(dn); - return 0; - } - - case SCIOCSETDN: - { - pr_debug("%s: SCIOSETDN: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the spid from user space - */ - dn = memdup_user(data->dataptr, SCIOC_DNSIZE); - if (IS_ERR(dn)) { - kfree(rcvmsg); - return PTR_ERR(dn); - } - - pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n", - sc_adapter[card]->devicename, data->channel, dn); - status = send_and_receive(card, CEPID, ceReqTypeCall, - ceReqClass0, ceReqCallSetMyNumber, data->channel, - strlen(dn), dn, rcvmsg, SAR_TIMEOUT); - if (!status && !(rcvmsg->rsp_status)) { - pr_debug("%s: SCIOCSETDN: command successful\n", - sc_adapter[card]->devicename); - kfree(rcvmsg); - kfree(dn); - return 0; - } - else { - pr_debug("%s: SCIOCSETDN: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - kfree(dn); - return status; - } - } - - case SCIOCTRACE: - - pr_debug("%s: SCIOTRACE: ioctl received\n", - sc_adapter[card]->devicename); -/* sc_adapter[card]->trace = !sc_adapter[card]->trace; - pr_debug("%s: SCIOCTRACE: tracing turned %s\n", - sc_adapter[card]->devicename, - sc_adapter[card]->trace ? "ON" : "OFF"); */ - break; - - case SCIOCSTAT: - { - boardInfo *bi; - - pr_debug("%s: SCIOSTAT: ioctl received\n", - sc_adapter[card]->devicename); - - bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); - if (!bi) { - kfree(rcvmsg); - return -ENOMEM; - } - - kfree(rcvmsg); - GetStatus(card, bi); - - if (copy_to_user(data->dataptr, bi, sizeof(boardInfo))) { - kfree(bi); - return -EFAULT; - } - - kfree(bi); - return 0; - } - - case SCIOCGETSPEED: - { - pr_debug("%s: SCIOGETSPEED: ioctl received\n", - sc_adapter[card]->devicename); - - /* - * Get the speed from the board - */ - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, - ceReqCallGetCallType, data->channel, 0, NULL, rcvmsg, SAR_TIMEOUT); - if (!status && !(rcvmsg->rsp_status)) { - pr_debug("%s: SCIOCGETSPEED: command successful\n", - sc_adapter[card]->devicename); - } - else { - pr_debug("%s: SCIOCGETSPEED: command failed (status = %d)\n", - sc_adapter[card]->devicename, status); - kfree(rcvmsg); - return status; - } - - speed = rcvmsg->msg_data.byte_array[0]; - - kfree(rcvmsg); - - /* - * Package the switch type and send to user space - */ - - if (copy_to_user(data->dataptr, &speed, sizeof(char))) - return -EFAULT; - - return 0; - } - - case SCIOCSETSPEED: - pr_debug("%s: SCIOCSETSPEED: ioctl received\n", - sc_adapter[card]->devicename); - break; - - case SCIOCLOOPTST: - pr_debug("%s: SCIOCLOOPTST: ioctl received\n", - sc_adapter[card]->devicename); - break; - - default: - kfree(rcvmsg); - return -1; - } - - kfree(rcvmsg); - return 0; -} - -static int GetStatus(int card, boardInfo *bi) -{ - RspMessage rcvmsg; - int i, status; - - /* - * Fill in some of the basic info about the board - */ - bi->modelid = sc_adapter[card]->model; - strcpy(bi->serial_no, sc_adapter[card]->hwconfig.serial_no); - strcpy(bi->part_no, sc_adapter[card]->hwconfig.part_no); - bi->iobase = sc_adapter[card]->iobase; - bi->rambase = sc_adapter[card]->rambase; - bi->irq = sc_adapter[card]->interrupt; - bi->ramsize = sc_adapter[card]->hwconfig.ram_size; - bi->interface = sc_adapter[card]->hwconfig.st_u_sense; - strcpy(bi->load_ver, sc_adapter[card]->load_ver); - strcpy(bi->proc_ver, sc_adapter[card]->proc_ver); - - /* - * Get the current PhyStats and LnkStats - */ - status = send_and_receive(card, CEPID, ceReqTypePhy, ceReqClass2, - ceReqPhyStatus, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - if (sc_adapter[card]->model < PRI_BOARD) { - bi->l1_status = rcvmsg.msg_data.byte_array[2]; - for (i = 0; i < BRI_CHANNELS; i++) - bi->status.bristats[i].phy_stat = - rcvmsg.msg_data.byte_array[i]; - } - else { - bi->l1_status = rcvmsg.msg_data.byte_array[0]; - bi->l2_status = rcvmsg.msg_data.byte_array[1]; - for (i = 0; i < PRI_CHANNELS; i++) - bi->status.pristats[i].phy_stat = - rcvmsg.msg_data.byte_array[i + 2]; - } - } - - /* - * Get the call types for each channel - */ - for (i = 0; i < sc_adapter[card]->nChannels; i++) { - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, - ceReqCallGetCallType, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - if (sc_adapter[card]->model == PRI_BOARD) { - bi->status.pristats[i].call_type = - rcvmsg.msg_data.byte_array[0]; - } - else { - bi->status.bristats[i].call_type = - rcvmsg.msg_data.byte_array[0]; - } - } - } - - /* - * If PRI, get the call states and service states for each channel - */ - if (sc_adapter[card]->model == PRI_BOARD) { - /* - * Get the call states - */ - status = send_and_receive(card, CEPID, ceReqTypeStat, ceReqClass2, - ceReqPhyChCallState, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - for (i = 0; i < PRI_CHANNELS; i++) - bi->status.pristats[i].call_state = - rcvmsg.msg_data.byte_array[i]; - } - - /* - * Get the service states - */ - status = send_and_receive(card, CEPID, ceReqTypeStat, ceReqClass2, - ceReqPhyChServState, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - for (i = 0; i < PRI_CHANNELS; i++) - bi->status.pristats[i].serv_state = - rcvmsg.msg_data.byte_array[i]; - } - - /* - * Get the link stats for the channels - */ - for (i = 1; i <= PRI_CHANNELS; i++) { - status = send_and_receive(card, CEPID, ceReqTypeLnk, ceReqClass0, - ceReqLnkGetStats, i, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - bi->status.pristats[i - 1].link_stats.tx_good = - (unsigned long)rcvmsg.msg_data.byte_array[0]; - bi->status.pristats[i - 1].link_stats.tx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[4]; - bi->status.pristats[i - 1].link_stats.rx_good = - (unsigned long)rcvmsg.msg_data.byte_array[8]; - bi->status.pristats[i - 1].link_stats.rx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[12]; - } - } - - /* - * Link stats for the D channel - */ - status = send_and_receive(card, CEPID, ceReqTypeLnk, ceReqClass0, - ceReqLnkGetStats, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - bi->dch_stats.tx_good = (unsigned long)rcvmsg.msg_data.byte_array[0]; - bi->dch_stats.tx_bad = (unsigned long)rcvmsg.msg_data.byte_array[4]; - bi->dch_stats.rx_good = (unsigned long)rcvmsg.msg_data.byte_array[8]; - bi->dch_stats.rx_bad = (unsigned long)rcvmsg.msg_data.byte_array[12]; - } - - return 0; - } - - /* - * If BRI or POTS, Get SPID, DN and call types for each channel - */ - - /* - * Get the link stats for the channels - */ - status = send_and_receive(card, CEPID, ceReqTypeLnk, ceReqClass0, - ceReqLnkGetStats, 0, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) { - bi->dch_stats.tx_good = (unsigned long)rcvmsg.msg_data.byte_array[0]; - bi->dch_stats.tx_bad = (unsigned long)rcvmsg.msg_data.byte_array[4]; - bi->dch_stats.rx_good = (unsigned long)rcvmsg.msg_data.byte_array[8]; - bi->dch_stats.rx_bad = (unsigned long)rcvmsg.msg_data.byte_array[12]; - bi->status.bristats[0].link_stats.tx_good = - (unsigned long)rcvmsg.msg_data.byte_array[16]; - bi->status.bristats[0].link_stats.tx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[20]; - bi->status.bristats[0].link_stats.rx_good = - (unsigned long)rcvmsg.msg_data.byte_array[24]; - bi->status.bristats[0].link_stats.rx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[28]; - bi->status.bristats[1].link_stats.tx_good = - (unsigned long)rcvmsg.msg_data.byte_array[32]; - bi->status.bristats[1].link_stats.tx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[36]; - bi->status.bristats[1].link_stats.rx_good = - (unsigned long)rcvmsg.msg_data.byte_array[40]; - bi->status.bristats[1].link_stats.rx_bad = - (unsigned long)rcvmsg.msg_data.byte_array[44]; - } - - /* - * Get the SPIDs - */ - for (i = 0; i < BRI_CHANNELS; i++) { - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, - ceReqCallGetSPID, i + 1, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) - strcpy(bi->status.bristats[i].spid, rcvmsg.msg_data.byte_array); - } - - /* - * Get the DNs - */ - for (i = 0; i < BRI_CHANNELS; i++) { - status = send_and_receive(card, CEPID, ceReqTypeCall, ceReqClass0, - ceReqCallGetMyNumber, i + 1, 0, NULL, &rcvmsg, SAR_TIMEOUT); - if (!status) - strcpy(bi->status.bristats[i].dn, rcvmsg.msg_data.byte_array); - } - - return 0; -} diff --git a/drivers/isdn/sc/message.c b/drivers/isdn/sc/message.c deleted file mode 100644 index 9679a1902b32..000000000000 --- a/drivers/isdn/sc/message.c +++ /dev/null @@ -1,230 +0,0 @@ -/* $Id: message.c,v 1.5.8.2 2001/09/23 22:24:59 kai Exp $ - * - * functions for sending and receiving control messages - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ -#include <linux/sched.h> -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" - -/* - * receive a message from the board - */ -int receivemessage(int card, RspMessage *rspmsg) -{ - DualPortMemory *dpm; - unsigned long flags; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -EINVAL; - } - - pr_debug("%s: Entered receivemessage\n", - sc_adapter[card]->devicename); - - /* - * See if there are messages waiting - */ - if (inb(sc_adapter[card]->ioport[FIFO_STATUS]) & RF_HAS_DATA) { - /* - * Map in the DPM to the base page and copy the message - */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - outb((sc_adapter[card]->shmem_magic >> 14) | 0x80, - sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); - dpm = (DualPortMemory *) sc_adapter[card]->rambase; - memcpy_fromio(rspmsg, &(dpm->rsp_queue[dpm->rsp_tail]), - MSG_LEN); - dpm->rsp_tail = (dpm->rsp_tail + 1) % MAX_MESSAGES; - inb(sc_adapter[card]->ioport[FIFO_READ]); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - /* - * Tell the board that the message is received - */ - pr_debug("%s: Received Message seq:%d pid:%d time:%d cmd:%d " - "cnt:%d (type,class,code):(%d,%d,%d) " - "link:%d stat:0x%x\n", - sc_adapter[card]->devicename, - rspmsg->sequence_no, - rspmsg->process_id, - rspmsg->time_stamp, - rspmsg->cmd_sequence_no, - rspmsg->msg_byte_cnt, - rspmsg->type, - rspmsg->class, - rspmsg->code, - rspmsg->phy_link_no, - rspmsg->rsp_status); - - return 0; - } - return -ENOMSG; -} - -/* - * send a message to the board - */ -int sendmessage(int card, - unsigned int procid, - unsigned int type, - unsigned int class, - unsigned int code, - unsigned int link, - unsigned int data_len, - unsigned int *data) -{ - DualPortMemory *dpm; - ReqMessage sndmsg; - unsigned long flags; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -EINVAL; - } - - /* - * Make sure we only send CEPID messages when the engine is up - * and CMPID messages when it is down - */ - if (sc_adapter[card]->EngineUp && procid == CMPID) { - pr_debug("%s: Attempt to send CM message with engine up\n", - sc_adapter[card]->devicename); - return -ESRCH; - } - - if (!sc_adapter[card]->EngineUp && procid == CEPID) { - pr_debug("%s: Attempt to send CE message with engine down\n", - sc_adapter[card]->devicename); - return -ESRCH; - } - - memset(&sndmsg, 0, MSG_LEN); - sndmsg.msg_byte_cnt = 4; - sndmsg.type = type; - sndmsg.class = class; - sndmsg.code = code; - sndmsg.phy_link_no = link; - - if (data_len > 0) { - if (data_len > MSG_DATA_LEN) - data_len = MSG_DATA_LEN; - memcpy(&(sndmsg.msg_data), data, data_len); - sndmsg.msg_byte_cnt = data_len + 8; - } - - sndmsg.process_id = procid; - sndmsg.sequence_no = sc_adapter[card]->seq_no++ % 256; - - /* - * wait for an empty slot in the queue - */ - while (!(inb(sc_adapter[card]->ioport[FIFO_STATUS]) & WF_NOT_FULL)) - udelay(1); - - /* - * Disable interrupts and map in shared memory - */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - outb((sc_adapter[card]->shmem_magic >> 14) | 0x80, - sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); - dpm = (DualPortMemory *) sc_adapter[card]->rambase; /* Fix me */ - memcpy_toio(&(dpm->req_queue[dpm->req_head]), &sndmsg, MSG_LEN); - dpm->req_head = (dpm->req_head + 1) % MAX_MESSAGES; - outb(sndmsg.sequence_no, sc_adapter[card]->ioport[FIFO_WRITE]); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - - pr_debug("%s: Sent Message seq:%d pid:%d time:%d " - "cnt:%d (type,class,code):(%d,%d,%d) " - "link:%d\n ", - sc_adapter[card]->devicename, - sndmsg.sequence_no, - sndmsg.process_id, - sndmsg.time_stamp, - sndmsg.msg_byte_cnt, - sndmsg.type, - sndmsg.class, - sndmsg.code, - sndmsg.phy_link_no); - - return 0; -} - -int send_and_receive(int card, - unsigned int procid, - unsigned char type, - unsigned char class, - unsigned char code, - unsigned char link, - unsigned char data_len, - unsigned char *data, - RspMessage *mesgdata, - int timeout) -{ - int retval; - int tries; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return -EINVAL; - } - - sc_adapter[card]->want_async_messages = 1; - retval = sendmessage(card, procid, type, class, code, link, - data_len, (unsigned int *) data); - - if (retval) { - pr_debug("%s: SendMessage failed in SAR\n", - sc_adapter[card]->devicename); - sc_adapter[card]->want_async_messages = 0; - return -EIO; - } - - tries = 0; - /* wait for the response */ - while (tries < timeout) { - schedule_timeout_interruptible(1); - - pr_debug("SAR waiting..\n"); - - /* - * See if we got our message back - */ - if ((sc_adapter[card]->async_msg.type == type) && - (sc_adapter[card]->async_msg.class == class) && - (sc_adapter[card]->async_msg.code == code) && - (sc_adapter[card]->async_msg.phy_link_no == link)) { - - /* - * Got it! - */ - pr_debug("%s: Got ASYNC message\n", - sc_adapter[card]->devicename); - memcpy(mesgdata, &(sc_adapter[card]->async_msg), - sizeof(RspMessage)); - sc_adapter[card]->want_async_messages = 0; - return 0; - } - - tries++; - } - - pr_debug("%s: SAR message timeout\n", sc_adapter[card]->devicename); - sc_adapter[card]->want_async_messages = 0; - return -ETIME; -} diff --git a/drivers/isdn/sc/message.h b/drivers/isdn/sc/message.h deleted file mode 100644 index 5e6f4a5c15f8..000000000000 --- a/drivers/isdn/sc/message.h +++ /dev/null @@ -1,245 +0,0 @@ -/* $Id: message.h,v 1.1.10.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * structures, macros and defines useful for sending - * messages to the adapter - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -/* - * Board message macros, defines and structures - */ - -#ifndef MESSAGE_H -#define MESSAGE_H - -#define MAX_MESSAGES 32 /* Maximum messages that can be - queued */ -#define MSG_DATA_LEN 48 /* Maximum size of message payload */ -#define MSG_LEN 64 /* Size of a message */ -#define CMPID 0 /* Loader message process ID */ -#define CEPID 64 /* Firmware message process ID */ - -/* - * Macro to determine if a message is a loader message - */ -#define IS_CM_MESSAGE(mesg, tx, cx, dx) \ - ((mesg.type == cmRspType##tx) \ - && (mesg.class == cmRspClass##cx) \ - && (mesg.code == cmRsp##dx)) - -/* - * Macro to determine if a message is a firmware message - */ -#define IS_CE_MESSAGE(mesg, tx, cx, dx) \ - ((mesg.type == ceRspType##tx) \ - && (mesg.class == ceRspClass##cx) \ - && (mesg.code == ceRsp##tx##dx)) - -/* - * Loader Request and Response Messages - */ - -/* message types */ -#define cmReqType1 1 -#define cmReqType2 2 -#define cmRspType0 0 -#define cmRspType1 1 -#define cmRspType2 2 -#define cmRspType5 5 - -/* message classes */ -#define cmReqClass0 0 -#define cmRspClass0 0 - -/* message codes */ -#define cmReqHWConfig 1 /* 1,0,1 */ -#define cmReqMsgLpbk 2 /* 1,0,2 */ -#define cmReqVersion 3 /* 1,0,3 */ -#define cmReqLoadProc 1 /* 2,0,1 */ -#define cmReqStartProc 2 /* 2,0,2 */ -#define cmReqReadMem 6 /* 2,0,6 */ -#define cmRspHWConfig cmReqHWConfig -#define cmRspMsgLpbk cmReqMsgLpbk -#define cmRspVersion cmReqVersion -#define cmRspLoadProc cmReqLoadProc -#define cmRspStartProc cmReqStartProc -#define cmRspReadMem cmReqReadMem -#define cmRspMiscEngineUp 1 /* 5,0,1 */ -#define cmRspInvalid 0 /* 0,0,0 */ - - -/* - * Firmware Request and Response Messages - */ - -/* message types */ -#define ceReqTypePhy 1 -#define ceReqTypeLnk 2 -#define ceReqTypeCall 3 -#define ceReqTypeStat 1 -#define ceRspTypeErr 0 -#define ceRspTypePhy ceReqTypePhy -#define ceRspTypeLnk ceReqTypeLnk -#define ceRspTypeCall ceReqTypeCall -#define ceRspTypeStat ceReqTypeStat - -/* message classes */ -#define ceReqClass0 0 -#define ceReqClass1 1 -#define ceReqClass2 2 -#define ceReqClass3 3 -#define ceRspClass0 ceReqClass0 -#define ceRspClass1 ceReqClass1 -#define ceRspClass2 ceReqClass2 -#define ceRspClass3 ceReqClass3 - -/* message codes (B) = BRI only, (P) = PRI only, (V) = POTS only */ -#define ceReqPhyProcInfo 1 /* 1,0,1 */ -#define ceReqPhyConnect 1 /* 1,1,1 */ -#define ceReqPhyDisconnect 2 /* 1,1,2 */ -#define ceReqPhySetParams 3 /* 1,1,3 (P) */ -#define ceReqPhyGetParams 4 /* 1,1,4 (P) */ -#define ceReqPhyStatus 1 /* 1,2,1 */ -#define ceReqPhyAcfaStatus 2 /* 1,2,2 (P) */ -#define ceReqPhyChCallState 3 /* 1,2,3 (P) */ -#define ceReqPhyChServState 4 /* 1,2,4 (P) */ -#define ceReqPhyRLoopBack 1 /* 1,3,1 */ -#define ceRspPhyProcInfo ceReqPhyProcInfo -#define ceRspPhyConnect ceReqPhyConnect -#define ceRspPhyDisconnect ceReqPhyDisconnect -#define ceRspPhySetParams ceReqPhySetParams -#define ceRspPhyGetParams ceReqPhyGetParams -#define ceRspPhyStatus ceReqPhyStatus -#define ceRspPhyAcfaStatus ceReqPhyAcfaStatus -#define ceRspPhyChCallState ceReqPhyChCallState -#define ceRspPhyChServState ceReqPhyChServState -#define ceRspPhyRLoopBack ceReqphyRLoopBack -#define ceReqLnkSetParam 1 /* 2,0,1 */ -#define ceReqLnkGetParam 2 /* 2,0,2 */ -#define ceReqLnkGetStats 3 /* 2,0,3 */ -#define ceReqLnkWrite 1 /* 2,1,1 */ -#define ceReqLnkRead 2 /* 2,1,2 */ -#define ceReqLnkFlush 3 /* 2,1,3 */ -#define ceReqLnkWrBufTrc 4 /* 2,1,4 */ -#define ceReqLnkRdBufTrc 5 /* 2,1,5 */ -#define ceRspLnkSetParam ceReqLnkSetParam -#define ceRspLnkGetParam ceReqLnkGetParam -#define ceRspLnkGetStats ceReqLnkGetStats -#define ceRspLnkWrite ceReqLnkWrite -#define ceRspLnkRead ceReqLnkRead -#define ceRspLnkFlush ceReqLnkFlush -#define ceRspLnkWrBufTrc ceReqLnkWrBufTrc -#define ceRspLnkRdBufTrc ceReqLnkRdBufTrc -#define ceReqCallSetSwitchType 1 /* 3,0,1 */ -#define ceReqCallGetSwitchType 2 /* 3,0,2 */ -#define ceReqCallSetFrameFormat 3 /* 3,0,3 */ -#define ceReqCallGetFrameFormat 4 /* 3,0,4 */ -#define ceReqCallSetCallType 5 /* 3,0,5 */ -#define ceReqCallGetCallType 6 /* 3,0,6 */ -#define ceReqCallSetSPID 7 /* 3,0,7 (!P) */ -#define ceReqCallGetSPID 8 /* 3,0,8 (!P) */ -#define ceReqCallSetMyNumber 9 /* 3,0,9 (!P) */ -#define ceReqCallGetMyNumber 10 /* 3,0,10 (!P) */ -#define ceRspCallSetSwitchType ceReqCallSetSwitchType -#define ceRspCallGetSwitchType ceReqCallSetSwitchType -#define ceRspCallSetFrameFormat ceReqCallSetFrameFormat -#define ceRspCallGetFrameFormat ceReqCallGetFrameFormat -#define ceRspCallSetCallType ceReqCallSetCallType -#define ceRspCallGetCallType ceReqCallGetCallType -#define ceRspCallSetSPID ceReqCallSetSPID -#define ceRspCallGetSPID ceReqCallGetSPID -#define ceRspCallSetMyNumber ceReqCallSetMyNumber -#define ceRspCallGetMyNumber ceReqCallGetMyNumber -#define ceRspStatAcfaStatus 2 -#define ceRspStat -#define ceRspErrError 0 /* 0,0,0 */ - -/* - * Call Types - */ -#define CALLTYPE_64K 0 -#define CALLTYPE_56K 1 -#define CALLTYPE_SPEECH 2 -#define CALLTYPE_31KHZ 3 - -/* - * Link Level data contains a pointer to and the length of - * a buffer in shared RAM. Used by LnkRead and LnkWrite message - * types. Part of RspMsgStruct and ReqMsgStruct. - */ -typedef struct { - unsigned long buff_offset; - unsigned short msg_len; -} LLData; - - -/* - * Message payload template for an HWConfig message - */ -typedef struct { - char st_u_sense; - char powr_sense; - char sply_sense; - unsigned char asic_id; - long ram_size; - char serial_no[13]; - char part_no[13]; - char rev_no[2]; -} HWConfig_pl; - -/* - * A Message - */ -struct message { - unsigned char sequence_no; - unsigned char process_id; - unsigned char time_stamp; - unsigned char cmd_sequence_no; /* Rsp messages only */ - unsigned char reserved1[3]; - unsigned char msg_byte_cnt; - unsigned char type; - unsigned char class; - unsigned char code; - unsigned char phy_link_no; - unsigned char rsp_status; /* Rsp messages only */ - unsigned char reseved2[3]; - union { - unsigned char byte_array[MSG_DATA_LEN]; - LLData response; - HWConfig_pl HWCresponse; - } msg_data; -}; - -typedef struct message ReqMessage; /* Request message */ -typedef struct message RspMessage; /* Response message */ - -/* - * The first 5010 bytes of shared memory contain the message queues, - * indexes and other data. This structure is its template - */ -typedef struct { - volatile ReqMessage req_queue[MAX_MESSAGES]; - volatile RspMessage rsp_queue[MAX_MESSAGES]; - volatile unsigned char req_head; - volatile unsigned char req_tail; - volatile unsigned char rsp_head; - volatile unsigned char rsp_tail; - volatile unsigned long signature; - volatile unsigned long trace_enable; - volatile unsigned char reserved[4]; -} DualPortMemory; - -#endif diff --git a/drivers/isdn/sc/packet.c b/drivers/isdn/sc/packet.c deleted file mode 100644 index 2446957085e0..000000000000 --- a/drivers/isdn/sc/packet.c +++ /dev/null @@ -1,204 +0,0 @@ -/* $Id: packet.c,v 1.5.8.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" - -int sndpkt(int devId, int channel, int ack, struct sk_buff *data) -{ - LLData ReqLnkWrite; - int status; - int card; - unsigned long len; - - card = get_card_from_id(devId); - - if (!IS_VALID_CARD(card)) { - pr_debug("invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - pr_debug("%s: sndpkt: frst = 0x%lx nxt = %d f = %d n = %d\n", - sc_adapter[card]->devicename, - sc_adapter[card]->channel[channel].first_sendbuf, - sc_adapter[card]->channel[channel].next_sendbuf, - sc_adapter[card]->channel[channel].free_sendbufs, - sc_adapter[card]->channel[channel].num_sendbufs); - - if (!sc_adapter[card]->channel[channel].free_sendbufs) { - pr_debug("%s: out of TX buffers\n", - sc_adapter[card]->devicename); - return -EINVAL; - } - - if (data->len > BUFFER_SIZE) { - pr_debug("%s: data overflows buffer size (data > buffer)\n", - sc_adapter[card]->devicename); - return -EINVAL; - } - - ReqLnkWrite.buff_offset = sc_adapter[card]->channel[channel].next_sendbuf * - BUFFER_SIZE + sc_adapter[card]->channel[channel].first_sendbuf; - ReqLnkWrite.msg_len = data->len; /* sk_buff size */ - pr_debug("%s: writing %d bytes to buffer offset 0x%lx\n", - sc_adapter[card]->devicename, - ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset); - memcpy_toshmem(card, (char *)ReqLnkWrite.buff_offset, data->data, ReqLnkWrite.msg_len); - - /* - * sendmessage - */ - pr_debug("%s: sndpkt size=%d, buf_offset=0x%lx buf_indx=%d\n", - sc_adapter[card]->devicename, - ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset, - sc_adapter[card]->channel[channel].next_sendbuf); - - status = sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkWrite, - channel + 1, sizeof(LLData), (unsigned int *)&ReqLnkWrite); - len = data->len; - if (status) { - pr_debug("%s: failed to send packet, status = %d\n", - sc_adapter[card]->devicename, status); - return -1; - } - else { - sc_adapter[card]->channel[channel].free_sendbufs--; - sc_adapter[card]->channel[channel].next_sendbuf = - ++sc_adapter[card]->channel[channel].next_sendbuf == - sc_adapter[card]->channel[channel].num_sendbufs ? 0 : - sc_adapter[card]->channel[channel].next_sendbuf; - pr_debug("%s: packet sent successfully\n", sc_adapter[card]->devicename); - dev_kfree_skb(data); - indicate_status(card, ISDN_STAT_BSENT, channel, (char *)&len); - } - return len; -} - -void rcvpkt(int card, RspMessage *rcvmsg) -{ - LLData newll; - struct sk_buff *skb; - - if (!IS_VALID_CARD(card)) { - pr_debug("invalid param: %d is not a valid card id\n", card); - return; - } - - switch (rcvmsg->rsp_status) { - case 0x01: - case 0x02: - case 0x70: - pr_debug("%s: error status code: 0x%x\n", - sc_adapter[card]->devicename, rcvmsg->rsp_status); - return; - case 0x00: - if (!(skb = dev_alloc_skb(rcvmsg->msg_data.response.msg_len))) { - printk(KERN_WARNING "%s: rcvpkt out of memory, dropping packet\n", - sc_adapter[card]->devicename); - return; - } - skb_put(skb, rcvmsg->msg_data.response.msg_len); - pr_debug("%s: getting data from offset: 0x%lx\n", - sc_adapter[card]->devicename, - rcvmsg->msg_data.response.buff_offset); - memcpy_fromshmem(card, - skb_put(skb, rcvmsg->msg_data.response.msg_len), - (char *)rcvmsg->msg_data.response.buff_offset, - rcvmsg->msg_data.response.msg_len); - sc_adapter[card]->card->rcvcallb_skb(sc_adapter[card]->driverId, - rcvmsg->phy_link_no - 1, skb); - - case 0x03: - /* - * Recycle the buffer - */ - pr_debug("%s: buffer size : %d\n", - sc_adapter[card]->devicename, BUFFER_SIZE); -/* memset_shmem(card, rcvmsg->msg_data.response.buff_offset, 0, BUFFER_SIZE); */ - newll.buff_offset = rcvmsg->msg_data.response.buff_offset; - newll.msg_len = BUFFER_SIZE; - pr_debug("%s: recycled buffer at offset 0x%lx size %d\n", - sc_adapter[card]->devicename, - newll.buff_offset, newll.msg_len); - sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkRead, - rcvmsg->phy_link_no, sizeof(LLData), (unsigned int *)&newll); - } - -} - -int setup_buffers(int card, int c) -{ - unsigned int nBuffers, i, cBase; - unsigned int buffer_size; - LLData RcvBuffOffset; - - if (!IS_VALID_CARD(card)) { - pr_debug("invalid param: %d is not a valid card id\n", card); - return -ENODEV; - } - - /* - * Calculate the buffer offsets (send/recv/send/recv) - */ - pr_debug("%s: setting up channel buffer space in shared RAM\n", - sc_adapter[card]->devicename); - buffer_size = BUFFER_SIZE; - nBuffers = ((sc_adapter[card]->ramsize - BUFFER_BASE) / buffer_size) / 2; - nBuffers = nBuffers > BUFFERS_MAX ? BUFFERS_MAX : nBuffers; - pr_debug("%s: calculating buffer space: %d buffers, %d big\n", - sc_adapter[card]->devicename, - nBuffers, buffer_size); - if (nBuffers < 2) { - pr_debug("%s: not enough buffer space\n", - sc_adapter[card]->devicename); - return -1; - } - cBase = (nBuffers * buffer_size) * (c - 1); - pr_debug("%s: channel buffer offset from shared RAM: 0x%x\n", - sc_adapter[card]->devicename, cBase); - sc_adapter[card]->channel[c - 1].first_sendbuf = BUFFER_BASE + cBase; - sc_adapter[card]->channel[c - 1].num_sendbufs = nBuffers / 2; - sc_adapter[card]->channel[c - 1].free_sendbufs = nBuffers / 2; - sc_adapter[card]->channel[c - 1].next_sendbuf = 0; - pr_debug("%s: send buffer setup complete: first=0x%lx n=%d f=%d, nxt=%d\n", - sc_adapter[card]->devicename, - sc_adapter[card]->channel[c - 1].first_sendbuf, - sc_adapter[card]->channel[c - 1].num_sendbufs, - sc_adapter[card]->channel[c - 1].free_sendbufs, - sc_adapter[card]->channel[c - 1].next_sendbuf); - - /* - * Prep the receive buffers - */ - pr_debug("%s: adding %d RecvBuffers:\n", - sc_adapter[card]->devicename, nBuffers / 2); - for (i = 0; i < nBuffers / 2; i++) { - RcvBuffOffset.buff_offset = - ((sc_adapter[card]->channel[c - 1].first_sendbuf + - (nBuffers / 2) * buffer_size) + (buffer_size * i)); - RcvBuffOffset.msg_len = buffer_size; - pr_debug("%s: adding RcvBuffer #%d offset=0x%lx sz=%d bufsz:%d\n", - sc_adapter[card]->devicename, - i + 1, RcvBuffOffset.buff_offset, - RcvBuffOffset.msg_len, buffer_size); - sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkRead, - c, sizeof(LLData), (unsigned int *)&RcvBuffOffset); - } - return 0; -} diff --git a/drivers/isdn/sc/scioc.h b/drivers/isdn/sc/scioc.h deleted file mode 100644 index a50e143779e7..000000000000 --- a/drivers/isdn/sc/scioc.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef __ISDN_SC_SCIOC_H__ -#define __ISDN_SC_SCIOC_H__ - -/* - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -/* - * IOCTL Command Codes - */ -#define SCIOCLOAD 0x01 /* Load a firmware record */ -#define SCIOCRESET 0x02 /* Perform hard reset */ -#define SCIOCDEBUG 0x03 /* Set debug level */ -#define SCIOCREV 0x04 /* Get driver revision(s) */ -#define SCIOCSTART 0x05 /* Start the firmware */ -#define SCIOCGETSWITCH 0x06 /* Get switch type */ -#define SCIOCSETSWITCH 0x07 /* Set switch type */ -#define SCIOCGETSPID 0x08 /* Get channel SPID */ -#define SCIOCSETSPID 0x09 /* Set channel SPID */ -#define SCIOCGETDN 0x0A /* Get channel DN */ -#define SCIOCSETDN 0x0B /* Set channel DN */ -#define SCIOCTRACE 0x0C /* Toggle trace mode */ -#define SCIOCSTAT 0x0D /* Get line status */ -#define SCIOCGETSPEED 0x0E /* Set channel speed */ -#define SCIOCSETSPEED 0x0F /* Set channel speed */ -#define SCIOCLOOPTST 0x10 /* Perform loopback test */ - -typedef struct { - int device; - int channel; - unsigned long command; - void __user *dataptr; -} scs_ioctl; - -/* Size of strings */ -#define SCIOC_SPIDSIZE 49 -#define SCIOC_DNSIZE SCIOC_SPIDSIZE -#define SCIOC_REVSIZE SCIOC_SPIDSIZE -#define SCIOC_SRECSIZE 49 - -typedef struct { - unsigned long tx_good; - unsigned long tx_bad; - unsigned long rx_good; - unsigned long rx_bad; -} ChLinkStats; - -typedef struct { - char spid[49]; - char dn[49]; - char call_type; - char phy_stat; - ChLinkStats link_stats; -} BRIStat; - -typedef BRIStat POTStat; - -typedef struct { - char call_type; - char call_state; - char serv_state; - char phy_stat; - ChLinkStats link_stats; -} PRIStat; - -typedef char PRIInfo; -typedef char BRIInfo; -typedef char POTInfo; - - -typedef struct { - char acfa_nos; - char acfa_ais; - char acfa_los; - char acfa_rra; - char acfa_slpp; - char acfa_slpn; - char acfa_fsrf; -} ACFAStat; - -typedef struct { - unsigned char modelid; - char serial_no[13]; - char part_no[13]; - char load_ver[11]; - char proc_ver[11]; - int iobase; - long rambase; - char irq; - long ramsize; - char interface; - char switch_type; - char l1_status; - char l2_status; - ChLinkStats dch_stats; - ACFAStat AcfaStats; - union { - PRIStat pristats[23]; - BRIStat bristats[2]; - POTStat potsstats[2]; - } status; - union { - PRIInfo priinfo; - BRIInfo briinfo; - POTInfo potsinfo; - } info; -} boardInfo; - -#endif /* __ISDN_SC_SCIOC_H__ */ diff --git a/drivers/isdn/sc/shmem.c b/drivers/isdn/sc/shmem.c deleted file mode 100644 index d24506ceb6e8..000000000000 --- a/drivers/isdn/sc/shmem.c +++ /dev/null @@ -1,138 +0,0 @@ -/* $Id: shmem.c,v 1.2.10.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * Card functions implementing ISDN4Linux functionality - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include "includes.h" /* This must be first */ -#include "hardware.h" -#include "card.h" - -/* - * - */ -void memcpy_toshmem(int card, void *dest, const void *src, size_t n) -{ - unsigned long flags; - unsigned char ch; - unsigned long dest_rem = ((unsigned long) dest) % 0x4000; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return; - } - - if (n > SRAM_PAGESIZE) - return; - - /* - * determine the page to load from the address - */ - ch = (unsigned long) dest / SRAM_PAGESIZE; - pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch); - /* - * Block interrupts and load the page - */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - - outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, - sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); - memcpy_toio((void __iomem *)(sc_adapter[card]->rambase + dest_rem), src, n); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - pr_debug("%s: set page to %#x\n", sc_adapter[card]->devicename, - ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80); - pr_debug("%s: copying %zu bytes from %#lx to %#lx\n", - sc_adapter[card]->devicename, n, - (unsigned long) src, - sc_adapter[card]->rambase + ((unsigned long) dest % 0x4000)); -} - -/* - * Reverse of above - */ -void memcpy_fromshmem(int card, void *dest, const void *src, size_t n) -{ - unsigned long flags; - unsigned char ch; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return; - } - - if (n > SRAM_PAGESIZE) { - return; - } - - /* - * determine the page to load from the address - */ - ch = (unsigned long) src / SRAM_PAGESIZE; - pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch); - - - /* - * Block interrupts and load the page - */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - - outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, - sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); - memcpy_fromio(dest, (void *)(sc_adapter[card]->rambase + - ((unsigned long) src % 0x4000)), n); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - pr_debug("%s: set page to %#x\n", sc_adapter[card]->devicename, - ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80); -/* pr_debug("%s: copying %d bytes from %#x to %#x\n", - sc_adapter[card]->devicename, n, - sc_adapter[card]->rambase + ((unsigned long) src %0x4000), (unsigned long) dest); */ -} - -#if 0 -void memset_shmem(int card, void *dest, int c, size_t n) -{ - unsigned long flags; - unsigned char ch; - - if (!IS_VALID_CARD(card)) { - pr_debug("Invalid param: %d is not a valid card id\n", card); - return; - } - - if (n > SRAM_PAGESIZE) { - return; - } - - /* - * determine the page to load from the address - */ - ch = (unsigned long) dest / SRAM_PAGESIZE; - pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch); - - /* - * Block interrupts and load the page - */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - - outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, - sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); - memset_io(sc_adapter[card]->rambase + - ((unsigned long) dest % 0x4000), c, n); - pr_debug("%s: set page to %#x\n", sc_adapter[card]->devicename, - ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); -} -#endif /* 0 */ diff --git a/drivers/isdn/sc/timer.c b/drivers/isdn/sc/timer.c deleted file mode 100644 index 6fbac2230d7e..000000000000 --- a/drivers/isdn/sc/timer.c +++ /dev/null @@ -1,122 +0,0 @@ -/* $Id: timer.c,v 1.3.6.1 2001/09/23 22:24:59 kai Exp $ - * - * Copyright (C) 1996 SpellCaster Telecommunications Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For more information, please contact gpl-info@spellcast.com or write: - * - * SpellCaster Telecommunications Inc. - * 5621 Finch Avenue East, Unit #3 - * Scarborough, Ontario Canada - * M1B 2T9 - * +1 (416) 297-8565 - * +1 (416) 297-6433 Facsimile - */ - -#include "includes.h" -#include "hardware.h" -#include "message.h" -#include "card.h" - - -/* - * Write the proper values into the I/O ports following a reset - */ -static void setup_ports(int card) -{ - - outb((sc_adapter[card]->rambase >> 12), sc_adapter[card]->ioport[EXP_BASE]); - - /* And the IRQ */ - outb((sc_adapter[card]->interrupt | 0x80), - sc_adapter[card]->ioport[IRQ_SELECT]); -} - -/* - * Timed function to check the status of a previous reset - * Must be very fast as this function runs in the context of - * an interrupt handler. - * - * Setup the ioports for the board that were cleared by the reset. - * Then, check to see if the signate has been set. Next, set the - * signature to a known value and issue a startproc if needed. - */ -void sc_check_reset(unsigned long data) -{ - unsigned long flags; - unsigned long sig; - int card = (unsigned int) data; - - pr_debug("%s: check_timer timer called\n", - sc_adapter[card]->devicename); - - /* Setup the io ports */ - setup_ports(card); - - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - outb(sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport], - (sc_adapter[card]->shmem_magic >> 14) | 0x80); - sig = (unsigned long) *((unsigned long *)(sc_adapter[card]->rambase + SIG_OFFSET)); - - /* check the signature */ - if (sig == SIGNATURE) { - flushreadfifo(card); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - /* See if we need to do a startproc */ - if (sc_adapter[card]->StartOnReset) - startproc(card); - } else { - pr_debug("%s: No signature yet, waiting another %lu jiffies.\n", - sc_adapter[card]->devicename, CHECKRESET_TIME); - mod_timer(&sc_adapter[card]->reset_timer, jiffies + CHECKRESET_TIME); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - } -} - -/* - * Timed function to check the status of a previous reset - * Must be very fast as this function runs in the context of - * an interrupt handler. - * - * Send check sc_adapter->phystat to see if the channels are up - * If they are, tell ISDN4Linux that the board is up. If not, - * tell IADN4Linux that it is up. Always reset the timer to - * fire again (endless loop). - */ -void check_phystat(unsigned long data) -{ - unsigned long flags; - int card = (unsigned int) data; - - pr_debug("%s: Checking status...\n", sc_adapter[card]->devicename); - /* - * check the results of the last PhyStat and change only if - * has changed drastically - */ - if (sc_adapter[card]->nphystat && !sc_adapter[card]->phystat) { /* All is well */ - pr_debug("PhyStat transition to RUN\n"); - pr_info("%s: Switch contacted, transmitter enabled\n", - sc_adapter[card]->devicename); - indicate_status(card, ISDN_STAT_RUN, 0, NULL); - } - else if (!sc_adapter[card]->nphystat && sc_adapter[card]->phystat) { /* All is not well */ - pr_debug("PhyStat transition to STOP\n"); - pr_info("%s: Switch connection lost, transmitter disabled\n", - sc_adapter[card]->devicename); - - indicate_status(card, ISDN_STAT_STOP, 0, NULL); - } - - sc_adapter[card]->phystat = sc_adapter[card]->nphystat; - - /* Reinitialize the timer */ - spin_lock_irqsave(&sc_adapter[card]->lock, flags); - mod_timer(&sc_adapter[card]->stat_timer, jiffies + CHECKSTAT_TIME); - spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); - - /* Send a new cePhyStatus message */ - sendmessage(card, CEPID, ceReqTypePhy, ceReqClass2, - ceReqPhyStatus, 0, 0, NULL); -} diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index b1ab8bdf8251..7f940c24a16b 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -52,6 +52,7 @@ config LEDS_AAT1290 config LEDS_BCM6328 tristate "LED Support for Broadcom BCM6328" depends on LEDS_CLASS + depends on HAS_IOMEM depends on OF help This option enables support for LEDs connected to the BCM6328 @@ -60,6 +61,7 @@ config LEDS_BCM6328 config LEDS_BCM6358 tristate "LED Support for Broadcom BCM6358" depends on LEDS_CLASS + depends on HAS_IOMEM depends on OF help This option enables support for LEDs connected to the BCM6358 diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c index 3b2573411a37..cf398275a53c 100644 --- a/drivers/leds/led-class-flash.c +++ b/drivers/leds/led-class-flash.c @@ -108,7 +108,7 @@ static ssize_t flash_strobe_store(struct device *dev, if (ret) goto unlock; - if (state < 0 || state > 1) { + if (state > 1) { ret = -EINVAL; goto unlock; } @@ -298,7 +298,7 @@ int led_classdev_flash_register(struct device *parent, led_cdev = &fled_cdev->led_cdev; if (led_cdev->flags & LED_DEV_CAP_FLASH) { - if (!led_cdev->brightness_set_sync) + if (!led_cdev->brightness_set_blocking) return -EINVAL; ops = fled_cdev->ops; @@ -316,10 +316,6 @@ int led_classdev_flash_register(struct device *parent, if (ret < 0) return ret; - /* Setting a torch brightness needs to have immediate effect */ - led_cdev->flags &= ~SET_BRIGHTNESS_ASYNC; - led_cdev->flags |= SET_BRIGHTNESS_SYNC; - return 0; } EXPORT_SYMBOL_GPL(led_classdev_flash_register); diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 7385f98dd54b..14139c337312 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -109,7 +109,7 @@ static const struct attribute_group *led_groups[] = { void led_classdev_suspend(struct led_classdev *led_cdev) { led_cdev->flags |= LED_SUSPENDED; - led_cdev->brightness_set(led_cdev, 0); + led_set_brightness_nopm(led_cdev, 0); } EXPORT_SYMBOL_GPL(led_classdev_suspend); @@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(led_classdev_suspend); */ void led_classdev_resume(struct led_classdev *led_cdev) { - led_cdev->brightness_set(led_cdev, led_cdev->brightness); + led_set_brightness_nopm(led_cdev, led_cdev->brightness); if (led_cdev->flash_resume) led_cdev->flash_resume(led_cdev); @@ -215,8 +215,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) if (!led_cdev->max_brightness) led_cdev->max_brightness = LED_FULL; - led_cdev->flags |= SET_BRIGHTNESS_ASYNC; - led_update_brightness(led_cdev); led_init_core(led_cdev); @@ -247,12 +245,13 @@ void led_classdev_unregister(struct led_classdev *led_cdev) up_write(&led_cdev->trigger_lock); #endif - cancel_work_sync(&led_cdev->set_brightness_work); - /* Stop blinking */ led_stop_software_blink(led_cdev); + led_set_brightness(led_cdev, LED_OFF); + flush_work(&led_cdev->set_brightness_work); + device_unregister(led_cdev->dev); down_write(&leds_list_lock); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index c1c3af089634..19e1e60dfaa3 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -32,7 +32,7 @@ static void led_timer_function(unsigned long data) unsigned long delay; if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { - led_set_brightness_async(led_cdev, LED_OFF); + led_set_brightness_nosleep(led_cdev, LED_OFF); return; } @@ -44,23 +44,23 @@ static void led_timer_function(unsigned long data) brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ - if (led_cdev->delayed_set_value) { - led_cdev->blink_brightness = - led_cdev->delayed_set_value; - led_cdev->delayed_set_value = 0; - } brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { /* Store the current brightness value to be able * to restore it when the delay_off period is over. + * Do it only if there is no pending blink brightness + * change, to avoid overwriting the new value. */ - led_cdev->blink_brightness = brightness; + if (!(led_cdev->flags & LED_BLINK_BRIGHTNESS_CHANGE)) + led_cdev->blink_brightness = brightness; + else + led_cdev->flags &= ~LED_BLINK_BRIGHTNESS_CHANGE; brightness = LED_OFF; delay = led_cdev->blink_delay_off; } - led_set_brightness_async(led_cdev, brightness); + led_set_brightness_nosleep(led_cdev, brightness); /* Return in next iteration if led is in one-shot mode and we are in * the final blink state so that the led is toggled each delay_on + @@ -83,10 +83,24 @@ static void set_brightness_delayed(struct work_struct *ws) { struct led_classdev *led_cdev = container_of(ws, struct led_classdev, set_brightness_work); + int ret = 0; - led_stop_software_blink(led_cdev); + if (led_cdev->flags & LED_BLINK_DISABLE) { + led_cdev->delayed_set_value = LED_OFF; + led_stop_software_blink(led_cdev); + led_cdev->flags &= ~LED_BLINK_DISABLE; + } - led_set_brightness_async(led_cdev, led_cdev->delayed_set_value); + if (led_cdev->brightness_set) + led_cdev->brightness_set(led_cdev, led_cdev->delayed_set_value); + else if (led_cdev->brightness_set_blocking) + ret = led_cdev->brightness_set_blocking(led_cdev, + led_cdev->delayed_set_value); + else + ret = -ENOTSUPP; + if (ret < 0) + dev_err(led_cdev->dev, + "Setting an LED's brightness failed (%d)\n", ret); } static void led_set_software_blink(struct led_classdev *led_cdev, @@ -106,13 +120,14 @@ static void led_set_software_blink(struct led_classdev *led_cdev, /* never on - just set to off */ if (!delay_on) { - led_set_brightness_async(led_cdev, LED_OFF); + led_set_brightness_nosleep(led_cdev, LED_OFF); return; } /* never off - just set to brightness */ if (!delay_off) { - led_set_brightness_async(led_cdev, led_cdev->blink_brightness); + led_set_brightness_nosleep(led_cdev, + led_cdev->blink_brightness); return; } @@ -156,7 +171,7 @@ void led_blink_set(struct led_classdev *led_cdev, led_blink_setup(led_cdev, delay_on, delay_off); } -EXPORT_SYMBOL(led_blink_set); +EXPORT_SYMBOL_GPL(led_blink_set); void led_blink_set_oneshot(struct led_classdev *led_cdev, unsigned long *delay_on, @@ -177,7 +192,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev, led_blink_setup(led_cdev, delay_on, delay_off); } -EXPORT_SYMBOL(led_blink_set_oneshot); +EXPORT_SYMBOL_GPL(led_blink_set_oneshot); void led_stop_software_blink(struct led_classdev *led_cdev) { @@ -190,29 +205,74 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink); void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { - int ret = 0; - - /* delay brightness if soft-blink is active */ + /* + * In case blinking is on delay brightness setting + * until the next timer tick. + */ if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { - led_cdev->delayed_set_value = brightness; - if (brightness == LED_OFF) + /* + * If we need to disable soft blinking delegate this to the + * work queue task to avoid problems in case we are called + * from hard irq context. + */ + if (brightness == LED_OFF) { + led_cdev->flags |= LED_BLINK_DISABLE; schedule_work(&led_cdev->set_brightness_work); + } else { + led_cdev->flags |= LED_BLINK_BRIGHTNESS_CHANGE; + led_cdev->blink_brightness = brightness; + } return; } - if (led_cdev->flags & SET_BRIGHTNESS_ASYNC) { - led_set_brightness_async(led_cdev, brightness); + led_set_brightness_nosleep(led_cdev, brightness); +} +EXPORT_SYMBOL_GPL(led_set_brightness); + +void led_set_brightness_nopm(struct led_classdev *led_cdev, + enum led_brightness value) +{ + /* Use brightness_set op if available, it is guaranteed not to sleep */ + if (led_cdev->brightness_set) { + led_cdev->brightness_set(led_cdev, value); return; - } else if (led_cdev->flags & SET_BRIGHTNESS_SYNC) - ret = led_set_brightness_sync(led_cdev, brightness); - else - ret = -EINVAL; + } - if (ret < 0) - dev_dbg(led_cdev->dev, "Setting LED brightness failed (%d)\n", - ret); + /* If brightness setting can sleep, delegate it to a work queue task */ + led_cdev->delayed_set_value = value; + schedule_work(&led_cdev->set_brightness_work); +} +EXPORT_SYMBOL_GPL(led_set_brightness_nopm); + +void led_set_brightness_nosleep(struct led_classdev *led_cdev, + enum led_brightness value) +{ + led_cdev->brightness = min(value, led_cdev->max_brightness); + + if (led_cdev->flags & LED_SUSPENDED) + return; + + led_set_brightness_nopm(led_cdev, led_cdev->brightness); +} +EXPORT_SYMBOL_GPL(led_set_brightness_nosleep); + +int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value) +{ + if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) + return -EBUSY; + + led_cdev->brightness = min(value, led_cdev->max_brightness); + + if (led_cdev->flags & LED_SUSPENDED) + return 0; + + if (led_cdev->brightness_set_blocking) + return led_cdev->brightness_set_blocking(led_cdev, + led_cdev->brightness); + return -ENOTSUPP; } -EXPORT_SYMBOL(led_set_brightness); +EXPORT_SYMBOL_GPL(led_set_brightness_sync); int led_update_brightness(struct led_classdev *led_cdev) { @@ -228,7 +288,7 @@ int led_update_brightness(struct led_classdev *led_cdev) return ret; } -EXPORT_SYMBOL(led_update_brightness); +EXPORT_SYMBOL_GPL(led_update_brightness); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_disable(struct led_classdev *led_cdev) diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index e8b1120f486d..e1e933424ac9 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -249,6 +249,34 @@ void led_trigger_unregister(struct led_trigger *trig) } EXPORT_SYMBOL_GPL(led_trigger_unregister); +static void devm_led_trigger_release(struct device *dev, void *res) +{ + led_trigger_unregister(*(struct led_trigger **)res); +} + +int devm_led_trigger_register(struct device *dev, + struct led_trigger *trig) +{ + struct led_trigger **dr; + int rc; + + dr = devres_alloc(devm_led_trigger_release, sizeof(*dr), + GFP_KERNEL); + if (!dr) + return -ENOMEM; + + *dr = trig; + + rc = led_trigger_register(trig); + if (rc) + devres_free(dr); + else + devres_add(dev, dr); + + return rc; +} +EXPORT_SYMBOL_GPL(devm_led_trigger_register); + /* Simple LED Tigger Interface */ void led_trigger_event(struct led_trigger *trig, diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 7870840e7cc9..1ad4d03a0a3c 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -16,7 +16,6 @@ #include <linux/i2c.h> #include <linux/leds.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/mfd/88pm860x.h> #include <linux/module.h> @@ -33,7 +32,6 @@ struct pm860x_led { struct led_classdev cdev; struct i2c_client *i2c; - struct work_struct work; struct pm860x_chip *chip; struct mutex lock; char name[MFD_NAME_SIZE]; @@ -69,17 +67,18 @@ static int led_power_set(struct pm860x_chip *chip, int port, int on) return ret; } -static void pm860x_led_work(struct work_struct *work) +static int pm860x_led_set(struct led_classdev *cdev, + enum led_brightness value) { - - struct pm860x_led *led; + struct pm860x_led *led = container_of(cdev, struct pm860x_led, cdev); struct pm860x_chip *chip; unsigned char buf[3]; int ret; - led = container_of(work, struct pm860x_led, work); chip = led->chip; mutex_lock(&led->lock); + led->brightness = value >> 3; + if ((led->current_brightness == 0) && led->brightness) { led_power_set(chip, led->port, 1); if (led->iset) { @@ -112,15 +111,8 @@ static void pm860x_led_work(struct work_struct *work) dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n", led->reg_control, led->brightness); mutex_unlock(&led->lock); -} -static void pm860x_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev); - - data->brightness = value >> 3; - schedule_work(&data->work); + return 0; } #ifdef CONFIG_OF @@ -213,9 +205,8 @@ static int pm860x_led_probe(struct platform_device *pdev) data->current_brightness = 0; data->cdev.name = data->name; - data->cdev.brightness_set = pm860x_led_set; + data->cdev.brightness_set_blocking = pm860x_led_set; mutex_init(&data->lock); - INIT_WORK(&data->work, pm860x_led_work); ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c index ac77d36b630c..def3cf9f7e92 100644 --- a/drivers/leds/leds-aat1290.c +++ b/drivers/leds/leds-aat1290.c @@ -20,7 +20,6 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <media/v4l2-flash-led-class.h> #define AAT1290_MOVIE_MODE_CURRENT_ADDR 17 @@ -82,8 +81,6 @@ struct aat1290_led { /* brightness cache */ unsigned int torch_brightness; - /* assures led-triggers compatibility */ - struct work_struct work_brightness_set; }; static struct aat1290_led *fled_cdev_to_led( @@ -92,6 +89,12 @@ static struct aat1290_led *fled_cdev_to_led( return container_of(fled_cdev, struct aat1290_led, fled_cdev); } +static struct led_classdev_flash *led_cdev_to_fled_cdev( + struct led_classdev *led_cdev) +{ + return container_of(led_cdev, struct led_classdev_flash, led_cdev); +} + static void aat1290_as2cwire_write(struct aat1290_led *led, int addr, int value) { int i; @@ -134,9 +137,14 @@ static void aat1290_set_flash_safety_timer(struct aat1290_led *led, flash_tm_reg); } -static void aat1290_brightness_set(struct aat1290_led *led, +/* LED subsystem callbacks */ + +static int aat1290_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { + struct led_classdev_flash *fled_cdev = led_cdev_to_fled_cdev(led_cdev); + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + mutex_lock(&led->lock); if (brightness == 0) { @@ -158,35 +166,6 @@ static void aat1290_brightness_set(struct aat1290_led *led, } mutex_unlock(&led->lock); -} - -/* LED subsystem callbacks */ - -static void aat1290_brightness_set_work(struct work_struct *work) -{ - struct aat1290_led *led = - container_of(work, struct aat1290_led, work_brightness_set); - - aat1290_brightness_set(led, led->torch_brightness); -} - -static void aat1290_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct aat1290_led *led = fled_cdev_to_led(fled_cdev); - - led->torch_brightness = brightness; - schedule_work(&led->work_brightness_set); -} - -static int aat1290_led_brightness_set_sync(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct aat1290_led *led = fled_cdev_to_led(fled_cdev); - - aat1290_brightness_set(led, brightness); return 0; } @@ -296,7 +275,7 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, if (ret < 0) { dev_err(dev, "flash-max-microamp DT property missing\n"); - return ret; + goto err_parse_dt; } ret = of_property_read_u32(child_node, "flash-max-timeout-us", @@ -304,13 +283,14 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, if (ret < 0) { dev_err(dev, "flash-max-timeout-us DT property missing\n"); - return ret; + goto err_parse_dt; } - of_node_put(child_node); - *sub_node = child_node; +err_parse_dt: + of_node_put(child_node); + return ret; } @@ -509,11 +489,9 @@ static int aat1290_led_probe(struct platform_device *pdev) mutex_init(&led->lock); /* Initialize LED Flash class device */ - led_cdev->brightness_set = aat1290_led_brightness_set; - led_cdev->brightness_set_sync = aat1290_led_brightness_set_sync; + led_cdev->brightness_set_blocking = aat1290_led_brightness_set; led_cdev->max_brightness = led_cfg.max_brightness; led_cdev->flags |= LED_DEV_CAP_FLASH; - INIT_WORK(&led->work_brightness_set, aat1290_brightness_set_work); aat1290_init_flash_timeout(led, &led_cfg); @@ -548,7 +526,6 @@ static int aat1290_led_remove(struct platform_device *pdev) v4l2_flash_release(led->v4l2_flash); led_classdev_flash_unregister(&led->fled_cdev); - cancel_work_sync(&led->work_brightness_set); mutex_destroy(&led->lock); diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c index 07e66cae32d3..853b2d3bdb17 100644 --- a/drivers/leds/leds-adp5520.c +++ b/drivers/leds/leds-adp5520.c @@ -17,34 +17,24 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> -#include <linux/workqueue.h> #include <linux/mfd/adp5520.h> #include <linux/slab.h> struct adp5520_led { struct led_classdev cdev; - struct work_struct work; struct device *master; - enum led_brightness new_brightness; int id; int flags; }; -static void adp5520_led_work(struct work_struct *work) -{ - struct adp5520_led *led = container_of(work, struct adp5520_led, work); - adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1, - led->new_brightness >> 2); -} - -static void adp5520_led_set(struct led_classdev *led_cdev, +static int adp5520_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct adp5520_led *led; led = container_of(led_cdev, struct adp5520_led, cdev); - led->new_brightness = value; - schedule_work(&led->work); + return adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1, + value >> 2); } static int adp5520_led_setup(struct adp5520_led *led) @@ -135,7 +125,7 @@ static int adp5520_led_probe(struct platform_device *pdev) led_dat->cdev.name = cur_led->name; led_dat->cdev.default_trigger = cur_led->default_trigger; - led_dat->cdev.brightness_set = adp5520_led_set; + led_dat->cdev.brightness_set_blocking = adp5520_led_set; led_dat->cdev.brightness = LED_OFF; if (cur_led->flags & ADP5520_FLAG_LED_MASK) @@ -146,9 +136,6 @@ static int adp5520_led_probe(struct platform_device *pdev) led_dat->id = led_dat->flags & ADP5520_FLAG_LED_MASK; led_dat->master = pdev->dev.parent; - led_dat->new_brightness = LED_OFF; - - INIT_WORK(&led_dat->work, adp5520_led_work); ret = led_classdev_register(led_dat->master, &led_dat->cdev); if (ret) { @@ -170,10 +157,8 @@ static int adp5520_led_probe(struct platform_device *pdev) err: if (i > 0) { - for (i = i - 1; i >= 0; i--) { + for (i = i - 1; i >= 0; i--) led_classdev_unregister(&led[i].cdev); - cancel_work_sync(&led[i].work); - } } return ret; @@ -192,7 +177,6 @@ static int adp5520_led_remove(struct platform_device *pdev) for (i = 0; i < pdata->num_leds; i++) { led_classdev_unregister(&led[i].cdev); - cancel_work_sync(&led[i].work); } return 0; diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index c7ea5c626331..1548259297c1 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -42,16 +42,16 @@ #define BCM6328_LED_SHIFT_TEST BIT(30) #define BCM6328_LED_TEST BIT(31) #define BCM6328_INIT_MASK (BCM6328_SERIAL_LED_EN | \ - BCM6328_SERIAL_LED_MUX | \ + BCM6328_SERIAL_LED_MUX | \ BCM6328_SERIAL_LED_CLK_NPOL | \ BCM6328_SERIAL_LED_DATA_PPOL | \ BCM6328_SERIAL_LED_SHIFT_DIR) #define BCM6328_LED_MODE_MASK 3 -#define BCM6328_LED_MODE_OFF 0 +#define BCM6328_LED_MODE_ON 0 #define BCM6328_LED_MODE_FAST 1 #define BCM6328_LED_MODE_BLINK 2 -#define BCM6328_LED_MODE_ON 3 +#define BCM6328_LED_MODE_OFF 3 #define BCM6328_LED_SHIFT(X) ((X) << 1) /** @@ -76,12 +76,20 @@ struct bcm6328_led { static void bcm6328_led_write(void __iomem *reg, unsigned long data) { +#ifdef CONFIG_CPU_BIG_ENDIAN iowrite32be(data, reg); +#else + writel(data, reg); +#endif } static unsigned long bcm6328_led_read(void __iomem *reg) { +#ifdef CONFIG_CPU_BIG_ENDIAN return ioread32be(reg); +#else + return readl(reg); +#endif } /** @@ -126,34 +134,45 @@ static void bcm6328_led_set(struct led_classdev *led_cdev, *(led->blink_leds) &= ~BIT(led->pin); if ((led->active_low && value == LED_OFF) || (!led->active_low && value != LED_OFF)) - bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); - else bcm6328_led_mode(led, BCM6328_LED_MODE_ON); + else + bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); spin_unlock_irqrestore(led->lock, flags); } +static unsigned long bcm6328_blink_delay(unsigned long delay) +{ + unsigned long bcm6328_delay; + + bcm6328_delay = delay + BCM6328_LED_INTERVAL_MS / 2; + bcm6328_delay = bcm6328_delay / BCM6328_LED_INTERVAL_MS; + if (bcm6328_delay == 0) + bcm6328_delay = 1; + + return bcm6328_delay; +} + static int bcm6328_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct bcm6328_led *led = container_of(led_cdev, struct bcm6328_led, cdev); unsigned long delay, flags; + int rc; if (!*delay_on) *delay_on = BCM6328_LED_DEF_DELAY; if (!*delay_off) *delay_off = BCM6328_LED_DEF_DELAY; - if (*delay_on != *delay_off) { + delay = bcm6328_blink_delay(*delay_on); + if (delay != bcm6328_blink_delay(*delay_off)) { dev_dbg(led_cdev->dev, "fallback to soft blinking (delay_on != delay_off)\n"); return -EINVAL; } - delay = *delay_on / BCM6328_LED_INTERVAL_MS; - if (delay == 0) - delay = 1; - else if (delay > BCM6328_LED_INTV_MASK) { + if (delay > BCM6328_LED_INTV_MASK) { dev_dbg(led_cdev->dev, "fallback to soft blinking (delay > %ums)\n", BCM6328_LED_INTV_MASK * BCM6328_LED_INTERVAL_MS); @@ -175,16 +194,15 @@ static int bcm6328_blink_set(struct led_classdev *led_cdev, bcm6328_led_write(led->mem + BCM6328_REG_INIT, val); bcm6328_led_mode(led, BCM6328_LED_MODE_BLINK); - - spin_unlock_irqrestore(led->lock, flags); + rc = 0; } else { - spin_unlock_irqrestore(led->lock, flags); dev_dbg(led_cdev->dev, "fallback to soft blinking (delay already set)\n"); - return -EINVAL; + rc = -EINVAL; } + spin_unlock_irqrestore(led->lock, flags); - return 0; + return rc; } static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, @@ -264,7 +282,6 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, unsigned long *blink_leds, unsigned long *blink_delay) { struct bcm6328_led *led; - unsigned long flags; const char *state; int rc; @@ -286,7 +303,6 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, "linux,default-trigger", NULL); - spin_lock_irqsave(lock, flags); if (!of_property_read_string(nc, "default-state", &state)) { if (!strcmp(state, "on")) { led->cdev.brightness = LED_FULL; @@ -303,8 +319,8 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, val = bcm6328_led_read(mode) >> BCM6328_LED_SHIFT(shift % 16); val &= BCM6328_LED_MODE_MASK; - if ((led->active_low && val == BCM6328_LED_MODE_ON) || - (!led->active_low && val == BCM6328_LED_MODE_OFF)) + if ((led->active_low && val == BCM6328_LED_MODE_OFF) || + (!led->active_low && val == BCM6328_LED_MODE_ON)) led->cdev.brightness = LED_FULL; else led->cdev.brightness = LED_OFF; @@ -315,12 +331,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, led->cdev.brightness = LED_OFF; } - if ((led->active_low && led->cdev.brightness == LED_FULL) || - (!led->active_low && led->cdev.brightness == LED_OFF)) - bcm6328_led_mode(led, BCM6328_LED_MODE_ON); - else - bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); - spin_unlock_irqrestore(lock, flags); + bcm6328_led_set(&led->cdev, led->cdev.brightness); led->cdev.brightness_set = bcm6328_led_set; led->cdev.blink_set = bcm6328_blink_set; @@ -341,7 +352,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev) struct device_node *child; struct resource *mem_r; void __iomem *mem; - spinlock_t *lock; + spinlock_t *lock; /* memory lock */ unsigned long val, *blink_leds, *blink_delay; mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c index 82b4ee1bc87e..b2cc06618abe 100644 --- a/drivers/leds/leds-bcm6358.c +++ b/drivers/leds/leds-bcm6358.c @@ -49,12 +49,20 @@ struct bcm6358_led { static void bcm6358_led_write(void __iomem *reg, unsigned long data) { +#ifdef CONFIG_CPU_BIG_ENDIAN iowrite32be(data, reg); +#else + writel(data, reg); +#endif } static unsigned long bcm6358_led_read(void __iomem *reg) { +#ifdef CONFIG_CPU_BIG_ENDIAN return ioread32be(reg); +#else + return readl(reg); +#endif } static unsigned long bcm6358_led_busy(void __iomem *mem) @@ -68,12 +76,15 @@ static unsigned long bcm6358_led_busy(void __iomem *mem) return val; } -static void bcm6358_led_mode(struct bcm6358_led *led, unsigned long value) +static void bcm6358_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { - unsigned long val; + struct bcm6358_led *led = + container_of(led_cdev, struct bcm6358_led, cdev); + unsigned long flags, val; + spin_lock_irqsave(led->lock, flags); bcm6358_led_busy(led->mem); - val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); if ((led->active_low && value == LED_OFF) || (!led->active_low && value != LED_OFF)) @@ -81,17 +92,6 @@ static void bcm6358_led_mode(struct bcm6358_led *led, unsigned long value) else val &= ~(BIT(led->pin)); bcm6358_led_write(led->mem + BCM6358_REG_MODE, val); -} - -static void bcm6358_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct bcm6358_led *led = - container_of(led_cdev, struct bcm6358_led, cdev); - unsigned long flags; - - spin_lock_irqsave(led->lock, flags); - bcm6358_led_mode(led, value); spin_unlock_irqrestore(led->lock, flags); } @@ -99,7 +99,6 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, void __iomem *mem, spinlock_t *lock) { struct bcm6358_led *led; - unsigned long flags; const char *state; int rc; @@ -119,15 +118,11 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, "linux,default-trigger", NULL); - spin_lock_irqsave(lock, flags); if (!of_property_read_string(nc, "default-state", &state)) { if (!strcmp(state, "on")) { led->cdev.brightness = LED_FULL; } else if (!strcmp(state, "keep")) { unsigned long val; - - bcm6358_led_busy(led->mem); - val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); val &= BIT(led->pin); if ((led->active_low && !val) || @@ -141,8 +136,8 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, } else { led->cdev.brightness = LED_OFF; } - bcm6358_led_mode(led, led->cdev.brightness); - spin_unlock_irqrestore(lock, flags); + + bcm6358_led_set(&led->cdev, led->cdev.brightness); led->cdev.brightness_set = bcm6358_led_set; diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 6078c15d3452..6b4de762a760 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c @@ -72,7 +72,6 @@ struct bd2802_led { struct bd2802_led_platform_data *pdata; struct i2c_client *client; struct rw_semaphore rwsem; - struct work_struct work; struct led_state led[2]; @@ -518,29 +517,22 @@ static struct device_attribute *bd2802_attributes[] = { &bd2802_rgb_current_attr, }; -static void bd2802_led_work(struct work_struct *work) -{ - struct bd2802_led *led = container_of(work, struct bd2802_led, work); - - if (led->state) - bd2802_turn_on(led, led->led_id, led->color, led->state); - else - bd2802_turn_off(led, led->led_id, led->color); -} - #define BD2802_CONTROL_RGBS(name, id, clr) \ -static void bd2802_set_##name##_brightness(struct led_classdev *led_cdev,\ +static int bd2802_set_##name##_brightness(struct led_classdev *led_cdev,\ enum led_brightness value) \ { \ struct bd2802_led *led = \ container_of(led_cdev, struct bd2802_led, cdev_##name); \ led->led_id = id; \ led->color = clr; \ - if (value == LED_OFF) \ + if (value == LED_OFF) { \ led->state = BD2802_OFF; \ - else \ + bd2802_turn_off(led, led->led_id, led->color); \ + } else { \ led->state = BD2802_ON; \ - schedule_work(&led->work); \ + bd2802_turn_on(led, led->led_id, led->color, BD2802_ON);\ + } \ + return 0; \ } \ static int bd2802_set_##name##_blink(struct led_classdev *led_cdev, \ unsigned long *delay_on, unsigned long *delay_off) \ @@ -552,7 +544,7 @@ static int bd2802_set_##name##_blink(struct led_classdev *led_cdev, \ led->led_id = id; \ led->color = clr; \ led->state = BD2802_BLINK; \ - schedule_work(&led->work); \ + bd2802_turn_on(led, led->led_id, led->color, BD2802_BLINK); \ return 0; \ } @@ -567,11 +559,9 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) { int ret; - INIT_WORK(&led->work, bd2802_led_work); - led->cdev_led1r.name = "led1_R"; led->cdev_led1r.brightness = LED_OFF; - led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; + led->cdev_led1r.brightness_set_blocking = bd2802_set_led1r_brightness; led->cdev_led1r.blink_set = bd2802_set_led1r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); @@ -583,7 +573,7 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led1g.name = "led1_G"; led->cdev_led1g.brightness = LED_OFF; - led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; + led->cdev_led1g.brightness_set_blocking = bd2802_set_led1g_brightness; led->cdev_led1g.blink_set = bd2802_set_led1g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); @@ -595,7 +585,7 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led1b.name = "led1_B"; led->cdev_led1b.brightness = LED_OFF; - led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; + led->cdev_led1b.brightness_set_blocking = bd2802_set_led1b_brightness; led->cdev_led1b.blink_set = bd2802_set_led1b_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); @@ -607,7 +597,7 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led2r.name = "led2_R"; led->cdev_led2r.brightness = LED_OFF; - led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; + led->cdev_led2r.brightness_set_blocking = bd2802_set_led2r_brightness; led->cdev_led2r.blink_set = bd2802_set_led2r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); @@ -619,7 +609,7 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led2g.name = "led2_G"; led->cdev_led2g.brightness = LED_OFF; - led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; + led->cdev_led2g.brightness_set_blocking = bd2802_set_led2g_brightness; led->cdev_led2g.blink_set = bd2802_set_led2g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); @@ -631,7 +621,7 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led2b.name = "led2_B"; led->cdev_led2b.brightness = LED_OFF; - led->cdev_led2b.brightness_set = bd2802_set_led2b_brightness; + led->cdev_led2b.brightness_set_blocking = bd2802_set_led2b_brightness; led->cdev_led2b.blink_set = bd2802_set_led2b_blink; led->cdev_led2b.flags |= LED_CORE_SUSPENDRESUME; @@ -661,7 +651,6 @@ failed_unregister_led1_R: static void bd2802_unregister_led_classdev(struct bd2802_led *led) { - cancel_work_sync(&led->work); led_classdev_unregister(&led->cdev_led2b); led_classdev_unregister(&led->cdev_led2g); led_classdev_unregister(&led->cdev_led2r); diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c index d0452b099aee..617fe975bf6e 100644 --- a/drivers/leds/leds-blinkm.c +++ b/drivers/leds/leds-blinkm.c @@ -39,16 +39,9 @@ struct blinkm_led { struct i2c_client *i2c_client; struct led_classdev led_cdev; int id; - atomic_t active; -}; - -struct blinkm_work { - struct blinkm_led *blinkm_led; - struct work_struct work; }; #define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev) -#define work_to_blmwork(c) container_of(c, struct blinkm_work, work) struct blinkm_data { struct i2c_client *i2c_client; @@ -439,65 +432,30 @@ static int blinkm_transfer_hw(struct i2c_client *client, int cmd) return 0; } -static void led_work(struct work_struct *work) -{ - int ret; - struct blinkm_led *led; - struct blinkm_data *data; - struct blinkm_work *blm_work = work_to_blmwork(work); - - led = blm_work->blinkm_led; - data = i2c_get_clientdata(led->i2c_client); - ret = blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB); - atomic_dec(&led->active); - dev_dbg(&led->i2c_client->dev, - "# DONE # next_red = %d, next_green = %d," - " next_blue = %d, active = %d\n", - data->next_red, data->next_green, - data->next_blue, atomic_read(&led->active)); - kfree(blm_work); -} - static int blinkm_led_common_set(struct led_classdev *led_cdev, enum led_brightness value, int color) { /* led_brightness is 0, 127 or 255 - we just use it here as-is */ struct blinkm_led *led = cdev_to_blmled(led_cdev); struct blinkm_data *data = i2c_get_clientdata(led->i2c_client); - struct blinkm_work *bl_work; switch (color) { case RED: /* bail out if there's no change */ if (data->next_red == (u8) value) return 0; - /* we assume a quite fast sequence here ([off]->on->off) - * think of network led trigger - we cannot blink that fast, so - * in case we already have a off->on->off transition queued up, - * we refuse to queue up more. - * Revisit: fast-changing brightness. */ - if (atomic_read(&led->active) > 1) - return 0; data->next_red = (u8) value; break; case GREEN: /* bail out if there's no change */ if (data->next_green == (u8) value) return 0; - /* we assume a quite fast sequence here ([off]->on->off) - * Revisit: fast-changing brightness. */ - if (atomic_read(&led->active) > 1) - return 0; data->next_green = (u8) value; break; case BLUE: /* bail out if there's no change */ if (data->next_blue == (u8) value) return 0; - /* we assume a quite fast sequence here ([off]->on->off) - * Revisit: fast-changing brightness. */ - if (atomic_read(&led->active) > 1) - return 0; data->next_blue = (u8) value; break; @@ -506,42 +464,31 @@ static int blinkm_led_common_set(struct led_classdev *led_cdev, return -EINVAL; } - bl_work = kzalloc(sizeof(*bl_work), GFP_ATOMIC); - if (!bl_work) - return -ENOMEM; - - atomic_inc(&led->active); + blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB); dev_dbg(&led->i2c_client->dev, - "#TO_SCHED# next_red = %d, next_green = %d," - " next_blue = %d, active = %d\n", + "# DONE # next_red = %d, next_green = %d," + " next_blue = %d\n", data->next_red, data->next_green, - data->next_blue, atomic_read(&led->active)); - - /* a fresh work _item_ for each change */ - bl_work->blinkm_led = led; - INIT_WORK(&bl_work->work, led_work); - /* queue work in own queue for easy sync on exit*/ - schedule_work(&bl_work->work); - + data->next_blue); return 0; } -static void blinkm_led_red_set(struct led_classdev *led_cdev, +static int blinkm_led_red_set(struct led_classdev *led_cdev, enum led_brightness value) { - blinkm_led_common_set(led_cdev, value, RED); + return blinkm_led_common_set(led_cdev, value, RED); } -static void blinkm_led_green_set(struct led_classdev *led_cdev, +static int blinkm_led_green_set(struct led_classdev *led_cdev, enum led_brightness value) { - blinkm_led_common_set(led_cdev, value, GREEN); + return blinkm_led_common_set(led_cdev, value, GREEN); } -static void blinkm_led_blue_set(struct led_classdev *led_cdev, +static int blinkm_led_blue_set(struct led_classdev *led_cdev, enum led_brightness value) { - blinkm_led_common_set(led_cdev, value, BLUE); + return blinkm_led_common_set(led_cdev, value, BLUE); } static void blinkm_init_hw(struct i2c_client *client) @@ -669,7 +616,6 @@ static int blinkm_probe(struct i2c_client *client, led[i]->id = i; led[i]->led_cdev.max_brightness = 255; led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME; - atomic_set(&led[i]->active, 0); switch (i) { case RED: snprintf(blinkm_led_name, sizeof(blinkm_led_name), @@ -677,7 +623,8 @@ static int blinkm_probe(struct i2c_client *client, client->adapter->nr, client->addr); led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set = blinkm_led_red_set; + led[i]->led_cdev.brightness_set_blocking = + blinkm_led_red_set; err = led_classdev_register(&client->dev, &led[i]->led_cdev); if (err < 0) { @@ -693,7 +640,8 @@ static int blinkm_probe(struct i2c_client *client, client->adapter->nr, client->addr); led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set = blinkm_led_green_set; + led[i]->led_cdev.brightness_set_blocking = + blinkm_led_green_set; err = led_classdev_register(&client->dev, &led[i]->led_cdev); if (err < 0) { @@ -709,7 +657,8 @@ static int blinkm_probe(struct i2c_client *client, client->adapter->nr, client->addr); led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set = blinkm_led_blue_set; + led[i]->led_cdev.brightness_set_blocking = + blinkm_led_blue_set; err = led_classdev_register(&client->dev, &led[i]->led_cdev); if (err < 0) { @@ -746,10 +695,8 @@ static int blinkm_remove(struct i2c_client *client) int i; /* make sure no workqueue entries are pending */ - for (i = 0; i < 3; i++) { - flush_scheduled_work(); + for (i = 0; i < 3; i++) led_classdev_unregister(&data->blinkm_leds[i].led_cdev); - } /* reset rgb */ data->next_red = 0x00; diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index 952ba96e5b38..4752a2b6ba2b 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> -#include <linux/workqueue.h> #include <linux/mfd/da903x.h> #include <linux/slab.h> @@ -33,9 +32,7 @@ struct da903x_led { struct led_classdev cdev; - struct work_struct work; struct device *master; - enum led_brightness new_brightness; int id; int flags; }; @@ -43,11 +40,13 @@ struct da903x_led { #define DA9030_LED_OFFSET(id) ((id) - DA9030_ID_LED_1) #define DA9034_LED_OFFSET(id) ((id) - DA9034_ID_LED_1) -static void da903x_led_work(struct work_struct *work) +static int da903x_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { - struct da903x_led *led = container_of(work, struct da903x_led, work); + struct da903x_led *led = + container_of(led_cdev, struct da903x_led, cdev); uint8_t val; - int offset; + int offset, ret = -EINVAL; switch (led->id) { case DA9030_ID_LED_1: @@ -57,37 +56,31 @@ static void da903x_led_work(struct work_struct *work) case DA9030_ID_LED_PC: offset = DA9030_LED_OFFSET(led->id); val = led->flags & ~0x87; - val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */ - val |= (0x7 - (led->new_brightness >> 5)) & 0x7; /* PWM<2:0> */ - da903x_write(led->master, DA9030_LED1_CONTROL + offset, val); + val |= value ? 0x80 : 0; /* EN bit */ + val |= (0x7 - (value >> 5)) & 0x7; /* PWM<2:0> */ + ret = da903x_write(led->master, DA9030_LED1_CONTROL + offset, + val); break; case DA9030_ID_VIBRA: val = led->flags & ~0x80; - val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */ - da903x_write(led->master, DA9030_MISC_CONTROL_A, val); + val |= value ? 0x80 : 0; /* EN bit */ + ret = da903x_write(led->master, DA9030_MISC_CONTROL_A, val); break; case DA9034_ID_LED_1: case DA9034_ID_LED_2: offset = DA9034_LED_OFFSET(led->id); - val = (led->new_brightness * 0x5f / LED_FULL) & 0x7f; + val = (value * 0x5f / LED_FULL) & 0x7f; val |= (led->flags & DA9034_LED_RAMP) ? 0x80 : 0; - da903x_write(led->master, DA9034_LED1_CONTROL + offset, val); + ret = da903x_write(led->master, DA9034_LED1_CONTROL + offset, + val); break; case DA9034_ID_VIBRA: - val = led->new_brightness & 0xfe; - da903x_write(led->master, DA9034_VIBRA, val); + val = value & 0xfe; + ret = da903x_write(led->master, DA9034_VIBRA, val); break; } -} -static void da903x_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct da903x_led *led; - - led = container_of(led_cdev, struct da903x_led, cdev); - led->new_brightness = value; - schedule_work(&led->work); + return ret; } static int da903x_led_probe(struct platform_device *pdev) @@ -113,15 +106,12 @@ static int da903x_led_probe(struct platform_device *pdev) led->cdev.name = pdata->name; led->cdev.default_trigger = pdata->default_trigger; - led->cdev.brightness_set = da903x_led_set; + led->cdev.brightness_set_blocking = da903x_led_set; led->cdev.brightness = LED_OFF; led->id = id; led->flags = pdata->flags; led->master = pdev->dev.parent; - led->new_brightness = LED_OFF; - - INIT_WORK(&led->work, da903x_led_work); ret = led_classdev_register(led->master, &led->cdev); if (ret) { diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c index 28291b6acc8e..f8c7d82c2652 100644 --- a/drivers/leds/leds-da9052.c +++ b/drivers/leds/leds-da9052.c @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> -#include <linux/workqueue.h> #include <linux/slab.h> #include <linux/mfd/da9052/reg.h> @@ -32,11 +31,9 @@ struct da9052_led { struct led_classdev cdev; - struct work_struct work; struct da9052 *da9052; unsigned char led_index; unsigned char id; - int brightness; }; static unsigned char led_reg[] = { @@ -44,12 +41,13 @@ static unsigned char led_reg[] = { DA9052_LED_CONT_5_REG, }; -static int da9052_set_led_brightness(struct da9052_led *led) +static int da9052_set_led_brightness(struct da9052_led *led, + enum led_brightness brightness) { u8 val; int error; - val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM; + val = (brightness & 0x7f) | DA9052_LED_CONT_DIM; error = da9052_reg_write(led->da9052, led_reg[led->led_index], val); if (error < 0) @@ -58,21 +56,13 @@ static int da9052_set_led_brightness(struct da9052_led *led) return error; } -static void da9052_led_work(struct work_struct *work) -{ - struct da9052_led *led = container_of(work, struct da9052_led, work); - - da9052_set_led_brightness(led); -} - -static void da9052_led_set(struct led_classdev *led_cdev, +static int da9052_led_set(struct led_classdev *led_cdev, enum led_brightness value) { - struct da9052_led *led; + struct da9052_led *led = + container_of(led_cdev, struct da9052_led, cdev); - led = container_of(led_cdev, struct da9052_led, cdev); - led->brightness = value; - schedule_work(&led->work); + return da9052_set_led_brightness(led, value); } static int da9052_configure_leds(struct da9052 *da9052) @@ -133,13 +123,11 @@ static int da9052_led_probe(struct platform_device *pdev) for (i = 0; i < pled->num_leds; i++) { led[i].cdev.name = pled->leds[i].name; - led[i].cdev.brightness_set = da9052_led_set; + led[i].cdev.brightness_set_blocking = da9052_led_set; led[i].cdev.brightness = LED_OFF; led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS; - led[i].brightness = LED_OFF; led[i].led_index = pled->leds[i].flags; led[i].da9052 = dev_get_drvdata(pdev->dev.parent); - INIT_WORK(&led[i].work, da9052_led_work); error = led_classdev_register(pdev->dev.parent, &led[i].cdev); if (error) { @@ -148,7 +136,8 @@ static int da9052_led_probe(struct platform_device *pdev) goto err_register; } - error = da9052_set_led_brightness(&led[i]); + error = da9052_set_led_brightness(&led[i], + led[i].cdev.brightness); if (error) { dev_err(&pdev->dev, "Unable to init led %d\n", led[i].led_index); @@ -166,10 +155,8 @@ static int da9052_led_probe(struct platform_device *pdev) return 0; err_register: - for (i = i - 1; i >= 0; i--) { + for (i = i - 1; i >= 0; i--) led_classdev_unregister(&led[i].cdev); - cancel_work_sync(&led[i].work); - } err: return error; } @@ -187,10 +174,8 @@ static int da9052_led_remove(struct platform_device *pdev) pled = pdata->pled; for (i = 0; i < pled->num_leds; i++) { - led[i].brightness = 0; - da9052_set_led_brightness(&led[i]); + da9052_set_led_brightness(&led[i], LED_OFF); led_classdev_unregister(&led[i].cdev); - cancel_work_sync(&led[i].work); } return 0; diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c index 314159610d24..5a5a86d5f1f5 100644 --- a/drivers/leds/leds-dac124s085.c +++ b/drivers/leds/leds-dac124s085.c @@ -13,20 +13,15 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> #include <linux/spi/spi.h> struct dac124s085_led { struct led_classdev ldev; struct spi_device *spi; int id; - int brightness; char name[sizeof("dac124s085-3")]; struct mutex mutex; - struct work_struct work; - spinlock_t lock; }; struct dac124s085 { @@ -38,29 +33,21 @@ struct dac124s085 { #define ALL_WRITE_UPDATE (2 << 12) #define POWER_DOWN_OUTPUT (3 << 12) -static void dac124s085_led_work(struct work_struct *work) +static int dac124s085_set_brightness(struct led_classdev *ldev, + enum led_brightness brightness) { - struct dac124s085_led *led = container_of(work, struct dac124s085_led, - work); + struct dac124s085_led *led = container_of(ldev, struct dac124s085_led, + ldev); u16 word; + int ret; mutex_lock(&led->mutex); word = cpu_to_le16(((led->id) << 14) | REG_WRITE_UPDATE | - (led->brightness & 0xfff)); - spi_write(led->spi, (const u8 *)&word, sizeof(word)); + (brightness & 0xfff)); + ret = spi_write(led->spi, (const u8 *)&word, sizeof(word)); mutex_unlock(&led->mutex); -} - -static void dac124s085_set_brightness(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct dac124s085_led *led = container_of(ldev, struct dac124s085_led, - ldev); - spin_lock(&led->lock); - led->brightness = brightness; - schedule_work(&led->work); - spin_unlock(&led->lock); + return ret; } static int dac124s085_probe(struct spi_device *spi) @@ -78,16 +65,13 @@ static int dac124s085_probe(struct spi_device *spi) for (i = 0; i < ARRAY_SIZE(dac->leds); i++) { led = dac->leds + i; led->id = i; - led->brightness = LED_OFF; led->spi = spi; snprintf(led->name, sizeof(led->name), "dac124s085-%d", i); - spin_lock_init(&led->lock); - INIT_WORK(&led->work, dac124s085_led_work); mutex_init(&led->mutex); led->ldev.name = led->name; led->ldev.brightness = LED_OFF; led->ldev.max_brightness = 0xfff; - led->ldev.brightness_set = dac124s085_set_brightness; + led->ldev.brightness_set_blocking = dac124s085_set_brightness; ret = led_classdev_register(&spi->dev, &led->ldev); if (ret < 0) goto eledcr; @@ -109,10 +93,8 @@ static int dac124s085_remove(struct spi_device *spi) struct dac124s085 *dac = spi_get_drvdata(spi); int i; - for (i = 0; i < ARRAY_SIZE(dac->leds); i++) { + for (i = 0; i < ARRAY_SIZE(dac->leds); i++) led_classdev_unregister(&dac->leds[i].ldev); - cancel_work_sync(&dac->leds[i].work); - } return 0; } diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 5db4515a4fd7..7bc53280dbfd 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -20,32 +20,16 @@ #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> -#include <linux/workqueue.h> struct gpio_led_data { struct led_classdev cdev; struct gpio_desc *gpiod; - struct work_struct work; - u8 new_level; u8 can_sleep; u8 blinking; int (*platform_gpio_blink_set)(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); }; -static void gpio_led_work(struct work_struct *work) -{ - struct gpio_led_data *led_dat = - container_of(work, struct gpio_led_data, work); - - if (led_dat->blinking) { - led_dat->platform_gpio_blink_set(led_dat->gpiod, - led_dat->new_level, NULL, NULL); - led_dat->blinking = 0; - } else - gpiod_set_value_cansleep(led_dat->gpiod, led_dat->new_level); -} - static void gpio_led_set(struct led_classdev *led_cdev, enum led_brightness value) { @@ -58,23 +42,25 @@ static void gpio_led_set(struct led_classdev *led_cdev, else level = 1; - /* Setting GPIOs with I2C/etc requires a task context, and we don't - * seem to have a reliable way to know if we're already in one; so - * let's just assume the worst. - */ - if (led_dat->can_sleep) { - led_dat->new_level = level; - schedule_work(&led_dat->work); + if (led_dat->blinking) { + led_dat->platform_gpio_blink_set(led_dat->gpiod, level, + NULL, NULL); + led_dat->blinking = 0; } else { - if (led_dat->blinking) { - led_dat->platform_gpio_blink_set(led_dat->gpiod, level, - NULL, NULL); - led_dat->blinking = 0; - } else + if (led_dat->can_sleep) + gpiod_set_value_cansleep(led_dat->gpiod, level); + else gpiod_set_value(led_dat->gpiod, level); } } +static int gpio_led_set_blocking(struct led_classdev *led_cdev, + enum led_brightness value) +{ + gpio_led_set(led_cdev, value); + return 0; +} + static int gpio_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { @@ -125,12 +111,15 @@ static int create_gpio_led(const struct gpio_led *template, led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->can_sleep = gpiod_cansleep(led_dat->gpiod); + if (!led_dat->can_sleep) + led_dat->cdev.brightness_set = gpio_led_set; + else + led_dat->cdev.brightness_set_blocking = gpio_led_set_blocking; led_dat->blinking = 0; if (blink_set) { led_dat->platform_gpio_blink_set = blink_set; led_dat->cdev.blink_set = gpio_blink_set; } - led_dat->cdev.brightness_set = gpio_led_set; if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) state = !!gpiod_get_value_cansleep(led_dat->gpiod); else @@ -143,17 +132,9 @@ static int create_gpio_led(const struct gpio_led *template, if (ret < 0) return ret; - INIT_WORK(&led_dat->work, gpio_led_work); - return led_classdev_register(parent, &led_dat->cdev); } -static void delete_gpio_led(struct gpio_led_data *led) -{ - led_classdev_unregister(&led->cdev); - cancel_work_sync(&led->work); -} - struct gpio_leds_priv { int num_leds; struct gpio_led_data leds[]; @@ -233,7 +214,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) err: for (count = priv->num_leds - 1; count >= 0; count--) - delete_gpio_led(&priv->leds[count]); + led_classdev_unregister(&priv->leds[count].cdev); return ERR_PTR(ret); } @@ -265,7 +246,8 @@ static int gpio_led_probe(struct platform_device *pdev) if (ret < 0) { /* On failure: unwind the led creations */ for (i = i - 1; i >= 0; i--) - delete_gpio_led(&priv->leds[i]); + led_classdev_unregister( + &priv->leds[i].cdev); return ret; } } @@ -286,7 +268,7 @@ static int gpio_led_remove(struct platform_device *pdev) int i; for (i = 0; i < priv->num_leds; i++) - delete_gpio_led(&priv->leds[i]); + led_classdev_unregister(&priv->leds[i].cdev); return 0; } diff --git a/drivers/leds/leds-ipaq-micro.c b/drivers/leds/leds-ipaq-micro.c index fa262b6b25eb..02f17331379d 100644 --- a/drivers/leds/leds-ipaq-micro.c +++ b/drivers/leds/leds-ipaq-micro.c @@ -20,7 +20,7 @@ #define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */ #define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ -static void micro_leds_brightness_set(struct led_classdev *led_cdev, +static int micro_leds_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent); @@ -50,7 +50,7 @@ static void micro_leds_brightness_set(struct led_classdev *led_cdev, msg.tx_data[2] = 1; msg.tx_data[3] = 0; /* Duty cycle 256 */ } - ipaq_micro_tx_msg_sync(micro, &msg); + return ipaq_micro_tx_msg_sync(micro, &msg); } /* Maximum duty cycle in ms 256/10 sec = 25600 ms */ @@ -102,7 +102,7 @@ static int micro_leds_blink_set(struct led_classdev *led_cdev, static struct led_classdev micro_led = { .name = "led-ipaq-micro", - .brightness_set = micro_leds_brightness_set, + .brightness_set_blocking = micro_leds_brightness_set, .blink_set = micro_leds_blink_set, .flags = LED_CORE_SUSPENDRESUME, }; diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c index feca07be85f5..bf23ba191ad0 100644 --- a/drivers/leds/leds-ktd2692.c +++ b/drivers/leds/leds-ktd2692.c @@ -18,7 +18,6 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <linux/workqueue.h> /* Value related the movie mode */ #define KTD2692_MOVIE_MODE_CURRENT_LEVELS 16 @@ -82,7 +81,6 @@ struct ktd2692_context { /* secures access to the device */ struct mutex lock; struct regulator *regulator; - struct work_struct work_brightness_set; struct gpio_desc *aux_gpio; struct gpio_desc *ctrl_gpio; @@ -158,9 +156,12 @@ static void ktd2692_expresswire_write(struct ktd2692_context *led, u8 value) ktd2692_expresswire_end(led); } -static void ktd2692_brightness_set(struct ktd2692_context *led, - enum led_brightness brightness) +static int ktd2692_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); + mutex_lock(&led->lock); if (brightness == LED_OFF) { @@ -174,33 +175,6 @@ static void ktd2692_brightness_set(struct ktd2692_context *led, ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE); mutex_unlock(&led->lock); -} - -static void ktd2692_brightness_set_work(struct work_struct *work) -{ - struct ktd2692_context *led = - container_of(work, struct ktd2692_context, work_brightness_set); - - ktd2692_brightness_set(led, led->torch_brightness); -} - -static void ktd2692_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); - - led->torch_brightness = brightness; - schedule_work(&led->work_brightness_set); -} - -static int ktd2692_led_brightness_set_sync(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); - - ktd2692_brightness_set(led, brightness); return 0; } @@ -332,21 +306,24 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, &cfg->movie_max_microamp); if (ret) { dev_err(dev, "failed to parse led-max-microamp\n"); - return ret; + goto err_parse_dt; } ret = of_property_read_u32(child_node, "flash-max-microamp", &cfg->flash_max_microamp); if (ret) { dev_err(dev, "failed to parse flash-max-microamp\n"); - return ret; + goto err_parse_dt; } ret = of_property_read_u32(child_node, "flash-max-timeout-us", &cfg->flash_max_timeout); - if (ret) + if (ret) { dev_err(dev, "failed to parse flash-max-timeout-us\n"); + goto err_parse_dt; + } +err_parse_dt: of_node_put(child_node); return ret; } @@ -381,12 +358,10 @@ static int ktd2692_probe(struct platform_device *pdev) fled_cdev->ops = &flash_ops; led_cdev->max_brightness = led_cfg.max_brightness; - led_cdev->brightness_set = ktd2692_led_brightness_set; - led_cdev->brightness_set_sync = ktd2692_led_brightness_set_sync; + led_cdev->brightness_set_blocking = ktd2692_led_brightness_set; led_cdev->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH; mutex_init(&led->lock); - INIT_WORK(&led->work_brightness_set, ktd2692_brightness_set_work); platform_set_drvdata(pdev, led); @@ -408,7 +383,6 @@ static int ktd2692_remove(struct platform_device *pdev) int ret; led_classdev_flash_unregister(&led->fled_cdev); - cancel_work_sync(&led->work_brightness_set); if (led->regulator) { ret = regulator_disable(led->regulator); diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 6e2e02035dd7..196dcb5e6004 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -17,7 +17,6 @@ #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/mfd/lm3533.h> @@ -53,9 +52,6 @@ struct lm3533_led { struct mutex mutex; unsigned long flags; - - struct work_struct work; - u8 new_brightness; }; @@ -123,27 +119,17 @@ out: return ret; } -static void lm3533_led_work(struct work_struct *work) -{ - struct lm3533_led *led = container_of(work, struct lm3533_led, work); - - dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness); - - if (led->new_brightness == 0) - lm3533_led_pattern_enable(led, 0); /* disable blink */ - - lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness); -} - -static void lm3533_led_set(struct led_classdev *cdev, +static int lm3533_led_set(struct led_classdev *cdev, enum led_brightness value) { struct lm3533_led *led = to_lm3533_led(cdev); dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value); - led->new_brightness = value; - schedule_work(&led->work); + if (value == 0) + lm3533_led_pattern_enable(led, 0); /* disable blink */ + + return lm3533_ctrlbank_set_brightness(&led->cb, value); } static enum led_brightness lm3533_led_get(struct led_classdev *cdev) @@ -693,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev) led->lm3533 = lm3533; led->cdev.name = pdata->name; led->cdev.default_trigger = pdata->default_trigger; - led->cdev.brightness_set = lm3533_led_set; + led->cdev.brightness_set_blocking = lm3533_led_set; led->cdev.brightness_get = lm3533_led_get; led->cdev.blink_set = lm3533_led_blink_set; led->cdev.brightness = LED_OFF; @@ -701,7 +687,6 @@ static int lm3533_led_probe(struct platform_device *pdev) led->id = pdev->id; mutex_init(&led->mutex); - INIT_WORK(&led->work, lm3533_led_work); /* The class framework makes a callback to get brightness during * registration so use parent device (for error reporting) until @@ -733,7 +718,6 @@ static int lm3533_led_probe(struct platform_device *pdev) err_unregister: led_classdev_unregister(&led->cdev); - flush_work(&led->work); return ret; } @@ -746,7 +730,6 @@ static int lm3533_led_remove(struct platform_device *pdev) lm3533_ctrlbank_disable(&led->cb); led_classdev_unregister(&led->cdev); - flush_work(&led->work); return 0; } @@ -760,7 +743,6 @@ static void lm3533_led_shutdown(struct platform_device *pdev) lm3533_ctrlbank_disable(&led->cb); lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */ - flush_work(&led->work); } static struct platform_driver lm3533_led_driver = { diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index 48872997d6b4..6cb94f9a2f3f 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -16,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/fs.h> #include <linux/regmap.h> -#include <linux/workqueue.h> #include <linux/platform_data/leds-lm355x.h> enum lm355x_type { @@ -59,14 +58,6 @@ struct lm355x_chip_data { struct led_classdev cdev_torch; struct led_classdev cdev_indicator; - struct work_struct work_flash; - struct work_struct work_torch; - struct work_struct work_indicator; - - u8 br_flash; - u8 br_torch; - u8 br_indicator; - struct lm355x_platform_data *pdata; struct regmap *regmap; struct mutex lock; @@ -204,7 +195,7 @@ out: } /* chip control */ -static void lm355x_control(struct lm355x_chip_data *chip, +static int lm355x_control(struct lm355x_chip_data *chip, u8 brightness, enum lm355x_mode opmode) { int ret; @@ -301,7 +292,7 @@ static void lm355x_control(struct lm355x_chip_data *chip, case MODE_SHDN: break; default: - return; + return -EINVAL; } /* operation mode control */ ret = regmap_update_bits(chip->regmap, preg[REG_OPMODE].regno, @@ -309,73 +300,55 @@ static void lm355x_control(struct lm355x_chip_data *chip, opmode << preg[REG_OPMODE].shift); if (ret < 0) goto out; - return; + return ret; out: dev_err(chip->dev, "%s:i2c access fail to register\n", __func__); - return; + return ret; } /* torch */ -static void lm355x_deferred_torch_brightness_set(struct work_struct *work) -{ - struct lm355x_chip_data *chip = - container_of(work, struct lm355x_chip_data, work_torch); - mutex_lock(&chip->lock); - lm355x_control(chip, chip->br_torch, MODE_TORCH); - mutex_unlock(&chip->lock); -} - -static void lm355x_torch_brightness_set(struct led_classdev *cdev, +static int lm355x_torch_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm355x_chip_data *chip = container_of(cdev, struct lm355x_chip_data, cdev_torch); - - chip->br_torch = brightness; - schedule_work(&chip->work_torch); -} - -/* flash */ -static void lm355x_deferred_strobe_brightness_set(struct work_struct *work) -{ - struct lm355x_chip_data *chip = - container_of(work, struct lm355x_chip_data, work_flash); + int ret; mutex_lock(&chip->lock); - lm355x_control(chip, chip->br_flash, MODE_FLASH); + ret = lm355x_control(chip, brightness, MODE_TORCH); mutex_unlock(&chip->lock); + return ret; } -static void lm355x_strobe_brightness_set(struct led_classdev *cdev, +/* flash */ + +static int lm355x_strobe_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm355x_chip_data *chip = container_of(cdev, struct lm355x_chip_data, cdev_flash); - - chip->br_flash = brightness; - schedule_work(&chip->work_flash); -} - -/* indicator */ -static void lm355x_deferred_indicator_brightness_set(struct work_struct *work) -{ - struct lm355x_chip_data *chip = - container_of(work, struct lm355x_chip_data, work_indicator); + int ret; mutex_lock(&chip->lock); - lm355x_control(chip, chip->br_indicator, MODE_INDIC); + ret = lm355x_control(chip, brightness, MODE_FLASH); mutex_unlock(&chip->lock); + return ret; } -static void lm355x_indicator_brightness_set(struct led_classdev *cdev, +/* indicator */ + +static int lm355x_indicator_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm355x_chip_data *chip = container_of(cdev, struct lm355x_chip_data, cdev_indicator); + int ret; - chip->br_indicator = brightness; - schedule_work(&chip->work_indicator); + mutex_lock(&chip->lock); + ret = lm355x_control(chip, brightness, MODE_INDIC); + mutex_unlock(&chip->lock); + return ret; } /* indicator pattern only for lm3556*/ @@ -479,34 +452,31 @@ static int lm355x_probe(struct i2c_client *client, goto err_out; /* flash */ - INIT_WORK(&chip->work_flash, lm355x_deferred_strobe_brightness_set); chip->cdev_flash.name = "flash"; chip->cdev_flash.max_brightness = 16; - chip->cdev_flash.brightness_set = lm355x_strobe_brightness_set; + chip->cdev_flash.brightness_set_blocking = lm355x_strobe_brightness_set; chip->cdev_flash.default_trigger = "flash"; err = led_classdev_register((struct device *) &client->dev, &chip->cdev_flash); if (err < 0) goto err_out; /* torch */ - INIT_WORK(&chip->work_torch, lm355x_deferred_torch_brightness_set); chip->cdev_torch.name = "torch"; chip->cdev_torch.max_brightness = 8; - chip->cdev_torch.brightness_set = lm355x_torch_brightness_set; + chip->cdev_torch.brightness_set_blocking = lm355x_torch_brightness_set; chip->cdev_torch.default_trigger = "torch"; err = led_classdev_register((struct device *) &client->dev, &chip->cdev_torch); if (err < 0) goto err_create_torch_file; /* indicator */ - INIT_WORK(&chip->work_indicator, - lm355x_deferred_indicator_brightness_set); chip->cdev_indicator.name = "indicator"; if (id->driver_data == CHIP_LM3554) chip->cdev_indicator.max_brightness = 4; else chip->cdev_indicator.max_brightness = 8; - chip->cdev_indicator.brightness_set = lm355x_indicator_brightness_set; + chip->cdev_indicator.brightness_set_blocking = + lm355x_indicator_brightness_set; /* indicator pattern control only for LM3556 */ if (id->driver_data == CHIP_LM3556) chip->cdev_indicator.groups = lm355x_indicator_groups; @@ -534,11 +504,8 @@ static int lm355x_remove(struct i2c_client *client) regmap_write(chip->regmap, preg[REG_OPMODE].regno, 0); led_classdev_unregister(&chip->cdev_indicator); - flush_work(&chip->work_indicator); led_classdev_unregister(&chip->cdev_torch); - flush_work(&chip->work_torch); led_classdev_unregister(&chip->cdev_flash); - flush_work(&chip->work_flash); dev_info(&client->dev, "%s is removed\n", lm355x_name[chip->type]); return 0; diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c index 02ebe342f5af..cada0848db7b 100644 --- a/drivers/leds/leds-lm3642.c +++ b/drivers/leds/leds-lm3642.c @@ -15,7 +15,6 @@ #include <linux/platform_device.h> #include <linux/fs.h> #include <linux/regmap.h> -#include <linux/workqueue.h> #include <linux/platform_data/leds-lm3642.h> #define REG_FILT_TIME (0x0) @@ -73,10 +72,6 @@ struct lm3642_chip_data { struct led_classdev cdev_torch; struct led_classdev cdev_indicator; - struct work_struct work_flash; - struct work_struct work_torch; - struct work_struct work_indicator; - u8 br_flash; u8 br_torch; u8 br_indicator; @@ -209,24 +204,18 @@ out_strtoint: static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store); -static void lm3642_deferred_torch_brightness_set(struct work_struct *work) -{ - struct lm3642_chip_data *chip = - container_of(work, struct lm3642_chip_data, work_torch); - - mutex_lock(&chip->lock); - lm3642_control(chip, chip->br_torch, MODES_TORCH); - mutex_unlock(&chip->lock); -} - -static void lm3642_torch_brightness_set(struct led_classdev *cdev, +static int lm3642_torch_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm3642_chip_data *chip = container_of(cdev, struct lm3642_chip_data, cdev_torch); + int ret; + mutex_lock(&chip->lock); chip->br_torch = brightness; - schedule_work(&chip->work_torch); + ret = lm3642_control(chip, chip->br_torch, MODES_TORCH); + mutex_unlock(&chip->lock); + return ret; } /* flash */ @@ -266,45 +255,33 @@ out_strtoint: static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store); -static void lm3642_deferred_strobe_brightness_set(struct work_struct *work) -{ - struct lm3642_chip_data *chip = - container_of(work, struct lm3642_chip_data, work_flash); - - mutex_lock(&chip->lock); - lm3642_control(chip, chip->br_flash, MODES_FLASH); - mutex_unlock(&chip->lock); -} - -static void lm3642_strobe_brightness_set(struct led_classdev *cdev, +static int lm3642_strobe_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm3642_chip_data *chip = container_of(cdev, struct lm3642_chip_data, cdev_flash); - - chip->br_flash = brightness; - schedule_work(&chip->work_flash); -} - -/* indicator */ -static void lm3642_deferred_indicator_brightness_set(struct work_struct *work) -{ - struct lm3642_chip_data *chip = - container_of(work, struct lm3642_chip_data, work_indicator); + int ret; mutex_lock(&chip->lock); - lm3642_control(chip, chip->br_indicator, MODES_INDIC); + chip->br_flash = brightness; + ret = lm3642_control(chip, chip->br_flash, MODES_FLASH); mutex_unlock(&chip->lock); + return ret; } -static void lm3642_indicator_brightness_set(struct led_classdev *cdev, +/* indicator */ +static int lm3642_indicator_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct lm3642_chip_data *chip = container_of(cdev, struct lm3642_chip_data, cdev_indicator); + int ret; + mutex_lock(&chip->lock); chip->br_indicator = brightness; - schedule_work(&chip->work_indicator); + ret = lm3642_control(chip, chip->br_indicator, MODES_INDIC); + mutex_unlock(&chip->lock); + return ret; } static const struct regmap_config lm3642_regmap = { @@ -371,10 +348,9 @@ static int lm3642_probe(struct i2c_client *client, goto err_out; /* flash */ - INIT_WORK(&chip->work_flash, lm3642_deferred_strobe_brightness_set); chip->cdev_flash.name = "flash"; chip->cdev_flash.max_brightness = 16; - chip->cdev_flash.brightness_set = lm3642_strobe_brightness_set; + chip->cdev_flash.brightness_set_blocking = lm3642_strobe_brightness_set; chip->cdev_flash.default_trigger = "flash"; chip->cdev_flash.groups = lm3642_flash_groups, err = led_classdev_register((struct device *) @@ -385,10 +361,9 @@ static int lm3642_probe(struct i2c_client *client, } /* torch */ - INIT_WORK(&chip->work_torch, lm3642_deferred_torch_brightness_set); chip->cdev_torch.name = "torch"; chip->cdev_torch.max_brightness = 8; - chip->cdev_torch.brightness_set = lm3642_torch_brightness_set; + chip->cdev_torch.brightness_set_blocking = lm3642_torch_brightness_set; chip->cdev_torch.default_trigger = "torch"; chip->cdev_torch.groups = lm3642_torch_groups, err = led_classdev_register((struct device *) @@ -399,11 +374,10 @@ static int lm3642_probe(struct i2c_client *client, } /* indicator */ - INIT_WORK(&chip->work_indicator, - lm3642_deferred_indicator_brightness_set); chip->cdev_indicator.name = "indicator"; chip->cdev_indicator.max_brightness = 8; - chip->cdev_indicator.brightness_set = lm3642_indicator_brightness_set; + chip->cdev_indicator.brightness_set_blocking = + lm3642_indicator_brightness_set; err = led_classdev_register((struct device *) &client->dev, &chip->cdev_indicator); if (err < 0) { @@ -427,11 +401,8 @@ static int lm3642_remove(struct i2c_client *client) struct lm3642_chip_data *chip = i2c_get_clientdata(client); led_classdev_unregister(&chip->cdev_indicator); - flush_work(&chip->work_indicator); led_classdev_unregister(&chip->cdev_torch); - flush_work(&chip->work_torch); led_classdev_unregister(&chip->cdev_flash); - flush_work(&chip->work_flash); regmap_write(chip->regmap, REG_ENABLE, 0); return 0; } diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c index 53144fb96167..6c758aea1bbd 100644 --- a/drivers/leds/leds-lp3944.c +++ b/drivers/leds/leds-lp3944.c @@ -31,7 +31,6 @@ #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> -#include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ @@ -68,10 +67,8 @@ struct lp3944_led_data { u8 id; enum lp3944_type type; - enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; - struct work_struct work; }; struct lp3944_data { @@ -275,13 +272,12 @@ static int lp3944_led_set_blink(struct led_classdev *led_cdev, dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); - led->status = LP3944_LED_STATUS_DIM0; - schedule_work(&led->work); + lp3944_led_set(led, LP3944_LED_STATUS_DIM0); return 0; } -static void lp3944_led_set_brightness(struct led_classdev *led_cdev, +static int lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); @@ -289,16 +285,7 @@ static void lp3944_led_set_brightness(struct led_classdev *led_cdev, dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); - led->status = !!brightness; - schedule_work(&led->work); -} - -static void lp3944_led_work(struct work_struct *work) -{ - struct lp3944_led_data *led; - - led = container_of(work, struct lp3944_led_data, work); - lp3944_led_set(led, led->status); + return lp3944_led_set(led, !!brightness); } static int lp3944_configure(struct i2c_client *client, @@ -318,14 +305,13 @@ static int lp3944_configure(struct i2c_client *client, case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; - led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; - led->ldev.brightness_set = lp3944_led_set_brightness; + led->ldev.brightness_set_blocking = + lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; - INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, @@ -336,14 +322,14 @@ static int lp3944_configure(struct i2c_client *client, /* to expose the default value to userspace */ led->ldev.brightness = - (enum led_brightness) led->status; + (enum led_brightness) pled->status; /* Set the default led status */ - err = lp3944_led_set(led, led->status); + err = lp3944_led_set(led, pled->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", - led->ldev.name, led->status); + led->ldev.name, pled->status); goto exit; } break; @@ -364,7 +350,6 @@ exit: case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: @@ -424,7 +409,6 @@ static int lp3944_remove(struct i2c_client *client) case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 63a92542c8cb..549b315ca8fe 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -362,16 +362,17 @@ static int lp5521_run_selftest(struct lp55xx_chip *chip, char *buf) return 0; } -static void lp5521_led_brightness_work(struct work_struct *work) +static int lp5521_led_brightness(struct lp55xx_led *led) { - struct lp55xx_led *led = container_of(work, struct lp55xx_led, - brightness_work); struct lp55xx_chip *chip = led->chip; + int ret; mutex_lock(&chip->lock); - lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr, + ret = lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); + + return ret; } static ssize_t show_engine_mode(struct device *dev, @@ -501,7 +502,7 @@ static struct lp55xx_device_config lp5521_cfg = { }, .max_channel = LP5521_MAX_LEDS, .post_init_device = lp5521_post_init_device, - .brightness_work_fn = lp5521_led_brightness_work, + .brightness_fn = lp5521_led_brightness, .set_led_current = lp5521_set_led_current, .firmware_cb = lp5521_firmware_loaded, .run_engine = lp5521_run_engine, diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 1d0187f42941..c5b30f06218a 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -802,16 +802,16 @@ leave: return ret; } -static void lp5523_led_brightness_work(struct work_struct *work) +static int lp5523_led_brightness(struct lp55xx_led *led) { - struct lp55xx_led *led = container_of(work, struct lp55xx_led, - brightness_work); struct lp55xx_chip *chip = led->chip; + int ret; mutex_lock(&chip->lock); - lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr, + ret = lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); + return ret; } static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode); @@ -867,7 +867,7 @@ static struct lp55xx_device_config lp5523_cfg = { }, .max_channel = LP5523_MAX_LEDS, .post_init_device = lp5523_post_init_device, - .brightness_work_fn = lp5523_led_brightness_work, + .brightness_fn = lp5523_led_brightness, .set_led_current = lp5523_set_led_current, .firmware_cb = lp5523_firmware_loaded, .run_engine = lp5523_run_engine, diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index 0360c59dbdc9..b75333803a63 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -311,10 +311,8 @@ static int lp5562_post_init_device(struct lp55xx_chip *chip) return 0; } -static void lp5562_led_brightness_work(struct work_struct *work) +static int lp5562_led_brightness(struct lp55xx_led *led) { - struct lp55xx_led *led = container_of(work, struct lp55xx_led, - brightness_work); struct lp55xx_chip *chip = led->chip; u8 addr[] = { LP5562_REG_R_PWM, @@ -322,10 +320,13 @@ static void lp5562_led_brightness_work(struct work_struct *work) LP5562_REG_B_PWM, LP5562_REG_W_PWM, }; + int ret; mutex_lock(&chip->lock); - lp55xx_write(chip, addr[led->chan_nr], led->brightness); + ret = lp55xx_write(chip, addr[led->chan_nr], led->brightness); mutex_unlock(&chip->lock); + + return ret; } static void lp5562_write_program_memory(struct lp55xx_chip *chip, @@ -503,7 +504,7 @@ static struct lp55xx_device_config lp5562_cfg = { }, .post_init_device = lp5562_post_init_device, .set_led_current = lp5562_set_led_current, - .brightness_work_fn = lp5562_led_brightness_work, + .brightness_fn = lp5562_led_brightness, .run_engine = lp5562_run_engine, .firmware_cb = lp5562_firmware_loaded, .dev_attr_group = &lp5562_group, diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 59b76833f0d3..5377f22ff994 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -134,13 +134,14 @@ static struct attribute *lp55xx_led_attrs[] = { }; ATTRIBUTE_GROUPS(lp55xx_led); -static void lp55xx_set_brightness(struct led_classdev *cdev, +static int lp55xx_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { struct lp55xx_led *led = cdev_to_lp55xx_led(cdev); + struct lp55xx_device_config *cfg = led->chip->cfg; led->brightness = (u8)brightness; - schedule_work(&led->brightness_work); + return cfg->brightness_fn(led); } static int lp55xx_init_led(struct lp55xx_led *led, @@ -172,7 +173,7 @@ static int lp55xx_init_led(struct lp55xx_led *led, return -EINVAL; } - led->cdev.brightness_set = lp55xx_set_brightness; + led->cdev.brightness_set_blocking = lp55xx_set_brightness; led->cdev.groups = lp55xx_led_groups; if (pdata->led_config[chan].name) { @@ -464,7 +465,7 @@ int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip) int ret; int i; - if (!cfg->brightness_work_fn) { + if (!cfg->brightness_fn) { dev_err(&chip->cl->dev, "empty brightness configuration\n"); return -EINVAL; } @@ -481,8 +482,6 @@ int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip) if (ret) goto err_init_led; - INIT_WORK(&each->brightness_work, cfg->brightness_work_fn); - chip->num_leds++; each->chip = chip; @@ -507,7 +506,6 @@ void lp55xx_unregister_leds(struct lp55xx_led *led, struct lp55xx_chip *chip) for (i = 0; i < chip->num_leds; i++) { each = led + i; led_classdev_unregister(&each->cdev); - flush_work(&each->brightness_work); } } EXPORT_SYMBOL_GPL(lp55xx_unregister_leds); diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h index c7f1e6155001..abf1fb5da37d 100644 --- a/drivers/leds/leds-lp55xx-common.h +++ b/drivers/leds/leds-lp55xx-common.h @@ -95,7 +95,7 @@ struct lp55xx_reg { * @enable : Chip specific enable command * @max_channel : Maximum number of channels * @post_init_device : Chip specific initialization code - * @brightness_work_fn : Brightness work function + * @brightness_fn : Brightness function * @set_led_current : LED current set function * @firmware_cb : Call function when the firmware is loaded * @run_engine : Run internal engine for pattern @@ -110,7 +110,7 @@ struct lp55xx_device_config { int (*post_init_device) (struct lp55xx_chip *chip); /* access brightness register */ - void (*brightness_work_fn)(struct work_struct *work); + int (*brightness_fn)(struct lp55xx_led *led); /* current setting function */ void (*set_led_current) (struct lp55xx_led *led, u8 led_current); @@ -164,7 +164,6 @@ struct lp55xx_chip { * @cdev : LED class device * @led_current : Current setting at each led channel * @max_current : Maximun current at each led channel - * @brightness_work : Workqueue for brightness control * @brightness : Brightness value * @chip : The lp55xx chip data */ @@ -173,7 +172,6 @@ struct lp55xx_led { struct led_classdev cdev; u8 led_current; u8 max_current; - struct work_struct brightness_work; u8 brightness; struct lp55xx_chip *chip; }; diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c index 3f54f6f2b821..3f9675bd214a 100644 --- a/drivers/leds/leds-lp8501.c +++ b/drivers/leds/leds-lp8501.c @@ -272,16 +272,17 @@ static void lp8501_firmware_loaded(struct lp55xx_chip *chip) lp8501_update_program_memory(chip, fw->data, fw->size); } -static void lp8501_led_brightness_work(struct work_struct *work) +static int lp8501_led_brightness(struct lp55xx_led *led) { - struct lp55xx_led *led = container_of(work, struct lp55xx_led, - brightness_work); struct lp55xx_chip *chip = led->chip; + int ret; mutex_lock(&chip->lock); - lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + led->chan_nr, + ret = lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); + + return ret; } /* Chip specific configurations */ @@ -296,7 +297,7 @@ static struct lp55xx_device_config lp8501_cfg = { }, .max_channel = LP8501_MAX_LEDS, .post_init_device = lp8501_post_init_device, - .brightness_work_fn = lp8501_led_brightness_work, + .brightness_fn = lp8501_led_brightness, .set_led_current = lp8501_set_led_current, .firmware_cb = lp8501_firmware_loaded, .run_engine = lp8501_run_engine, diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c index 3409f03c1fa8..0eee38fc0565 100644 --- a/drivers/leds/leds-lp8788.c +++ b/drivers/leds/leds-lp8788.c @@ -26,10 +26,8 @@ struct lp8788_led { struct lp8788 *lp; struct mutex lock; - struct work_struct work; struct led_classdev led_dev; enum lp8788_isink_number isink_num; - enum led_brightness brightness; int on; }; @@ -76,24 +74,29 @@ static int lp8788_led_init_device(struct lp8788_led *led, return lp8788_update_bits(led->lp, addr, mask, val); } -static void lp8788_led_enable(struct lp8788_led *led, +static int lp8788_led_enable(struct lp8788_led *led, enum lp8788_isink_number num, int on) { + int ret; + u8 mask = 1 << num; u8 val = on << num; - if (lp8788_update_bits(led->lp, LP8788_ISINK_CTRL, mask, val)) - return; + ret = lp8788_update_bits(led->lp, LP8788_ISINK_CTRL, mask, val); + if (ret == 0) + led->on = on; - led->on = on; + return ret; } -static void lp8788_led_work(struct work_struct *work) +static int lp8788_brightness_set(struct led_classdev *led_cdev, + enum led_brightness val) { - struct lp8788_led *led = container_of(work, struct lp8788_led, work); + struct lp8788_led *led = + container_of(led_cdev, struct lp8788_led, led_dev); + enum lp8788_isink_number num = led->isink_num; - int enable; - u8 val = led->brightness; + int enable, ret; mutex_lock(&led->lock); @@ -101,28 +104,21 @@ static void lp8788_led_work(struct work_struct *work) case LP8788_ISINK_1: case LP8788_ISINK_2: case LP8788_ISINK_3: - lp8788_write_byte(led->lp, lp8788_pwm_addr[num], val); + ret = lp8788_write_byte(led->lp, lp8788_pwm_addr[num], val); + if (ret < 0) + goto unlock; break; default: mutex_unlock(&led->lock); - return; + return -EINVAL; } enable = (val > 0) ? 1 : 0; if (enable != led->on) - lp8788_led_enable(led, num, enable); - + ret = lp8788_led_enable(led, num, enable); +unlock: mutex_unlock(&led->lock); -} - -static void lp8788_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brt_val) -{ - struct lp8788_led *led = - container_of(led_cdev, struct lp8788_led, led_dev); - - led->brightness = brt_val; - schedule_work(&led->work); + return ret; } static int lp8788_led_probe(struct platform_device *pdev) @@ -139,7 +135,7 @@ static int lp8788_led_probe(struct platform_device *pdev) led->lp = lp; led->led_dev.max_brightness = MAX_BRIGHTNESS; - led->led_dev.brightness_set = lp8788_brightness_set; + led->led_dev.brightness_set_blocking = lp8788_brightness_set; led_pdata = lp->pdata ? lp->pdata->led_pdata : NULL; @@ -149,7 +145,6 @@ static int lp8788_led_probe(struct platform_device *pdev) led->led_dev.name = led_pdata->name; mutex_init(&led->lock); - INIT_WORK(&led->work, lp8788_led_work); platform_set_drvdata(pdev, led); @@ -173,7 +168,6 @@ static int lp8788_led_remove(struct platform_device *pdev) struct lp8788_led *led = platform_get_drvdata(pdev); led_classdev_unregister(&led->led_dev); - flush_work(&led->work); return 0; } diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c index 79f084354e67..3e70775a2d54 100644 --- a/drivers/leds/leds-lp8860.c +++ b/drivers/leds/leds-lp8860.c @@ -91,26 +91,22 @@ /** * struct lp8860_led - * @lock - Lock for reading/writing the device - * @work - Work item used to off load the brightness register writes * @client - Pointer to the I2C client * @led_dev - led class device pointer * @regmap - Devices register map * @eeprom_regmap - EEPROM register map * @enable_gpio - VDDIO/EN gpio to enable communication interface * @regulator - LED supply regulator pointer - * @brightness - Current brightness value requested * @label - LED label **/ struct lp8860_led { struct mutex lock; - struct work_struct work; struct i2c_client *client; struct led_classdev led_dev; struct regmap *regmap; struct regmap *eeprom_regmap; struct gpio_desc *enable_gpio; struct regulator *regulator; - enum led_brightness brightness; const char *label; }; @@ -212,11 +208,13 @@ out: return ret; } -static void lp8860_led_brightness_work(struct work_struct *work) +static int lp8860_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brt_val) { - struct lp8860_led *led = container_of(work, struct lp8860_led, work); + struct lp8860_led *led = + container_of(led_cdev, struct lp8860_led, led_dev); + int disp_brightness = brt_val * 255; int ret; - int disp_brightness = led->brightness * 255; mutex_lock(&led->lock); @@ -241,16 +239,7 @@ static void lp8860_led_brightness_work(struct work_struct *work) } out: mutex_unlock(&led->lock); -} - -static void lp8860_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brt_val) -{ - struct lp8860_led *led = - container_of(led_cdev, struct lp8860_led, led_dev); - - led->brightness = brt_val; - schedule_work(&led->work); + return ret; } static int lp8860_init(struct lp8860_led *led) @@ -406,10 +395,9 @@ static int lp8860_probe(struct i2c_client *client, led->client = client; led->led_dev.name = led->label; led->led_dev.max_brightness = LED_FULL; - led->led_dev.brightness_set = lp8860_brightness_set; + led->led_dev.brightness_set_blocking = lp8860_brightness_set; mutex_init(&led->lock); - INIT_WORK(&led->work, lp8860_led_brightness_work); i2c_set_clientdata(client, led); @@ -448,7 +436,6 @@ static int lp8860_remove(struct i2c_client *client) int ret; led_classdev_unregister(&led->led_dev); - cancel_work_sync(&led->work); if (led->enable_gpio) gpiod_direction_output(led->enable_gpio, 0); diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c index 9f41124765cc..a7ff510cbdd0 100644 --- a/drivers/leds/leds-lt3593.c +++ b/drivers/leds/leds-lt3593.c @@ -19,7 +19,6 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> -#include <linux/workqueue.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/slab.h> @@ -28,15 +27,14 @@ struct lt3593_led_data { struct led_classdev cdev; unsigned gpio; - struct work_struct work; - u8 new_level; }; -static void lt3593_led_work(struct work_struct *work) +static int lt3593_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { - int pulses; struct lt3593_led_data *led_dat = - container_of(work, struct lt3593_led_data, work); + container_of(led_cdev, struct lt3593_led_data, cdev); + int pulses; /* * The LT3593 resets its internal current level register to the maximum @@ -47,18 +45,18 @@ static void lt3593_led_work(struct work_struct *work) * applied is to the output driver. */ - if (led_dat->new_level == 0) { + if (value == 0) { gpio_set_value_cansleep(led_dat->gpio, 0); - return; + return 0; } - pulses = 32 - (led_dat->new_level * 32) / 255; + pulses = 32 - (value * 32) / 255; if (pulses == 0) { gpio_set_value_cansleep(led_dat->gpio, 0); mdelay(1); gpio_set_value_cansleep(led_dat->gpio, 1); - return; + return 0; } gpio_set_value_cansleep(led_dat->gpio, 1); @@ -69,16 +67,8 @@ static void lt3593_led_work(struct work_struct *work) gpio_set_value_cansleep(led_dat->gpio, 1); udelay(1); } -} -static void lt3593_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct lt3593_led_data *led_dat = - container_of(led_cdev, struct lt3593_led_data, cdev); - - led_dat->new_level = value; - schedule_work(&led_dat->work); + return 0; } static int create_lt3593_led(const struct gpio_led *template, @@ -97,7 +87,7 @@ static int create_lt3593_led(const struct gpio_led *template, led_dat->cdev.default_trigger = template->default_trigger; led_dat->gpio = template->gpio; - led_dat->cdev.brightness_set = lt3593_led_set; + led_dat->cdev.brightness_set_blocking = lt3593_led_set; state = (template->default_state == LEDS_GPIO_DEFSTATE_ON); led_dat->cdev.brightness = state ? LED_FULL : LED_OFF; @@ -111,8 +101,6 @@ static int create_lt3593_led(const struct gpio_led *template, if (ret < 0) return ret; - INIT_WORK(&led_dat->work, lt3593_led_work); - ret = led_classdev_register(parent, &led_dat->cdev); if (ret < 0) return ret; @@ -129,7 +117,6 @@ static void delete_lt3593_led(struct lt3593_led_data *led) return; led_classdev_unregister(&led->cdev); - cancel_work_sync(&led->work); } static int lt3593_led_probe(struct platform_device *pdev) diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c index afbb1409b2e2..1eb58ef6aefe 100644 --- a/drivers/leds/leds-max77693.c +++ b/drivers/leds/leds-max77693.c @@ -20,7 +20,6 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <media/v4l2-flash-led-class.h> #define MODE_OFF 0 @@ -62,8 +61,6 @@ struct max77693_sub_led { int fled_id; /* corresponding LED Flash class device */ struct led_classdev_flash fled_cdev; - /* assures led-triggers compatibility */ - struct work_struct work_brightness_set; /* V4L2 Flash device */ struct v4l2_flash *v4l2_flash; @@ -463,10 +460,14 @@ static int max77693_setup(struct max77693_led_device *led, return max77693_set_mode_reg(led, MODE_OFF); } -static int __max77693_led_brightness_set(struct max77693_led_device *led, - int fled_id, enum led_brightness value) +/* LED subsystem callbacks */ +static int max77693_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) { - int ret; + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + int fled_id = sub_led->fled_id, ret; mutex_lock(&led->lock); @@ -494,43 +495,8 @@ static int __max77693_led_brightness_set(struct max77693_led_device *led, ret); unlock: mutex_unlock(&led->lock); - return ret; -} - -static void max77693_led_brightness_set_work( - struct work_struct *work) -{ - struct max77693_sub_led *sub_led = - container_of(work, struct max77693_sub_led, - work_brightness_set); - struct max77693_led_device *led = sub_led_to_led(sub_led); - - __max77693_led_brightness_set(led, sub_led->fled_id, - sub_led->torch_brightness); -} - -/* LED subsystem callbacks */ - -static int max77693_led_brightness_set_sync( - struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); - struct max77693_led_device *led = sub_led_to_led(sub_led); - - return __max77693_led_brightness_set(led, sub_led->fled_id, value); -} -static void max77693_led_brightness_set( - struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); - struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); - - sub_led->torch_brightness = value; - schedule_work(&sub_led->work_brightness_set); + return ret; } static int max77693_led_flash_brightness_set( @@ -682,6 +648,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, if (sub_nodes[fled_id]) { dev_err(dev, "Conflicting \"led-sources\" DT properties\n"); + of_node_put(child_node); return -EINVAL; } @@ -931,16 +898,13 @@ static void max77693_init_fled_cdev(struct max77693_sub_led *sub_led, led_cdev->name = led_cfg->label[fled_id]; - led_cdev->brightness_set = max77693_led_brightness_set; - led_cdev->brightness_set_sync = max77693_led_brightness_set_sync; + led_cdev->brightness_set_blocking = max77693_led_brightness_set; led_cdev->max_brightness = (led->iout_joint ? led_cfg->iout_torch_max[FLED1] + led_cfg->iout_torch_max[FLED2] : led_cfg->iout_torch_max[fled_id]) / TORCH_IOUT_STEP; led_cdev->flags |= LED_DEV_CAP_FLASH; - INIT_WORK(&sub_led->work_brightness_set, - max77693_led_brightness_set_work); max77693_init_flash_settings(sub_led, led_cfg); @@ -1062,13 +1026,11 @@ static int max77693_led_remove(struct platform_device *pdev) if (led->iout_joint || max77693_fled_used(led, FLED1)) { v4l2_flash_release(sub_leds[FLED1].v4l2_flash); led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev); - cancel_work_sync(&sub_leds[FLED1].work_brightness_set); } if (!led->iout_joint && max77693_fled_used(led, FLED2)) { v4l2_flash_release(sub_leds[FLED2].v4l2_flash); led_classdev_flash_unregister(&sub_leds[FLED2].fled_cdev); - cancel_work_sync(&sub_leds[FLED2].work_brightness_set); } mutex_destroy(&led->lock); diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c index c592aa5662bb..01b459069358 100644 --- a/drivers/leds/leds-max8997.c +++ b/drivers/leds/leds-max8997.c @@ -13,7 +13,6 @@ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/leds.h> #include <linux/mfd/max8997.h> #include <linux/mfd/max8997-private.h> diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c index e2b847fe22a1..a2e4c1792e17 100644 --- a/drivers/leds/leds-mc13783.c +++ b/drivers/leds/leds-mc13783.c @@ -20,7 +20,6 @@ #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/of.h> -#include <linux/workqueue.h> #include <linux/mfd/mc13xxx.h> struct mc13xxx_led_devtype { @@ -32,8 +31,6 @@ struct mc13xxx_led_devtype { struct mc13xxx_led { struct led_classdev cdev; - struct work_struct work; - enum led_brightness new_brightness; int id; struct mc13xxx_leds *leds; }; @@ -55,9 +52,11 @@ static unsigned int mc13xxx_max_brightness(int id) return 0x3f; } -static void mc13xxx_led_work(struct work_struct *work) +static int mc13xxx_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { - struct mc13xxx_led *led = container_of(work, struct mc13xxx_led, work); + struct mc13xxx_led *led = + container_of(led_cdev, struct mc13xxx_led, cdev); struct mc13xxx_leds *leds = led->leds; unsigned int reg, bank, off, shift; @@ -105,19 +104,9 @@ static void mc13xxx_led_work(struct work_struct *work) BUG(); } - mc13xxx_reg_rmw(leds->master, leds->devtype->ledctrl_base + reg, + return mc13xxx_reg_rmw(leds->master, leds->devtype->ledctrl_base + reg, mc13xxx_max_brightness(led->id) << shift, - led->new_brightness << shift); -} - -static void mc13xxx_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct mc13xxx_led *led = - container_of(led_cdev, struct mc13xxx_led, cdev); - - led->new_brightness = value; - schedule_work(&led->work); + value << shift); } #ifdef CONFIG_OF @@ -257,11 +246,9 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev) leds->led[i].cdev.name = name; leds->led[i].cdev.default_trigger = trig; leds->led[i].cdev.flags = LED_CORE_SUSPENDRESUME; - leds->led[i].cdev.brightness_set = mc13xxx_led_set; + leds->led[i].cdev.brightness_set_blocking = mc13xxx_led_set; leds->led[i].cdev.max_brightness = mc13xxx_max_brightness(id); - INIT_WORK(&leds->led[i].work, mc13xxx_led_work); - ret = led_classdev_register(dev->parent, &leds->led[i].cdev); if (ret) { dev_err(dev, "Failed to register LED %i\n", id); @@ -270,10 +257,8 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev) } if (ret) - while (--i >= 0) { + while (--i >= 0) led_classdev_unregister(&leds->led[i].cdev); - cancel_work_sync(&leds->led[i].work); - } return ret; } @@ -283,10 +268,8 @@ static int mc13xxx_led_remove(struct platform_device *pdev) struct mc13xxx_leds *leds = platform_get_drvdata(pdev); int i; - for (i = 0; i < leds->num_leds; i++) { + for (i = 0; i < leds->num_leds; i++) led_classdev_unregister(&leds->led[i].cdev); - cancel_work_sync(&leds->led[i].work); - } return 0; } diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index a95a61220169..506b75b190e7 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -45,24 +45,12 @@ struct ns2_led_data { unsigned cmd; unsigned slow; bool can_sleep; - int mode_index; unsigned char sata; /* True when SATA mode active. */ rwlock_t rw_lock; /* Lock GPIOs. */ - struct work_struct work; int num_modes; struct ns2_led_modval *modval; }; -static void ns2_led_work(struct work_struct *work) -{ - struct ns2_led_data *led_dat = - container_of(work, struct ns2_led_data, work); - int i = led_dat->mode_index; - - gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level); - gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level); -} - static int ns2_led_get_mode(struct ns2_led_data *led_dat, enum ns2_led_modes *mode) { @@ -112,8 +100,8 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, goto exit_unlock; } - led_dat->mode_index = i; - schedule_work(&led_dat->work); + gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level); + gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level); exit_unlock: write_unlock_irqrestore(&led_dat->rw_lock, flags); @@ -136,6 +124,13 @@ static void ns2_led_set(struct led_classdev *led_cdev, ns2_led_set_mode(led_dat, mode); } +static int ns2_led_set_blocking(struct led_classdev *led_cdev, + enum led_brightness value) +{ + ns2_led_set(led_cdev, value); + return 0; +} + static ssize_t ns2_led_sata_store(struct device *dev, struct device_attribute *attr, const char *buff, size_t count) @@ -219,13 +214,16 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->cdev.blink_set = NULL; - led_dat->cdev.brightness_set = ns2_led_set; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; led_dat->cdev.groups = ns2_led_groups; led_dat->cmd = template->cmd; led_dat->slow = template->slow; led_dat->can_sleep = gpio_cansleep(led_dat->cmd) | gpio_cansleep(led_dat->slow); + if (led_dat->can_sleep) + led_dat->cdev.brightness_set_blocking = ns2_led_set_blocking; + else + led_dat->cdev.brightness_set = ns2_led_set; led_dat->modval = template->modval; led_dat->num_modes = template->num_modes; @@ -238,8 +236,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, led_dat->cdev.brightness = (mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL; - INIT_WORK(&led_dat->work, ns2_led_work); - ret = led_classdev_register(&pdev->dev, &led_dat->cdev); if (ret < 0) return ret; @@ -250,7 +246,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, static void delete_ns2_led(struct ns2_led_data *led_dat) { led_classdev_unregister(&led_dat->cdev); - cancel_work_sync(&led_dat->work); } #ifdef CONFIG_OF_GPIO diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 5a6363d161a2..17c63ec9fb9e 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -158,7 +158,7 @@ static void pca9532_setled(struct pca9532_led *led) mutex_unlock(&data->update_lock); } -static void pca9532_set_brightness(struct led_classdev *led_cdev, +static int pca9532_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { int err = 0; @@ -172,9 +172,12 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev, led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ err = pca9532_calcpwm(led->client, 0, 0, value); if (err) - return; /* XXX: led api doesn't allow error code? */ + return err; } - schedule_work(&led->work); + if (led->state == PCA9532_PWM0) + pca9532_setpwm(led->client, 0); + pca9532_setled(led); + return err; } static int pca9532_set_blink(struct led_classdev *led_cdev, @@ -198,7 +201,10 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); if (err) return err; - schedule_work(&led->work); + if (led->state == PCA9532_PWM0) + pca9532_setpwm(led->client, 0); + pca9532_setled(led); + return 0; } @@ -233,15 +239,6 @@ static void pca9532_input_work(struct work_struct *work) mutex_unlock(&data->update_lock); } -static void pca9532_led_work(struct work_struct *work) -{ - struct pca9532_led *led; - led = container_of(work, struct pca9532_led, work); - if (led->state == PCA9532_PWM0) - pca9532_setpwm(led->client, 0); - pca9532_setled(led); -} - #ifdef CONFIG_LEDS_PCA9532_GPIO static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset) { @@ -307,7 +304,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs) break; case PCA9532_TYPE_LED: led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); break; case PCA9532_TYPE_N2100_BEEP: if (data->idev != NULL) { @@ -359,9 +355,9 @@ static int pca9532_configure(struct i2c_client *client, led->name = pled->name; led->ldev.name = led->name; led->ldev.brightness = LED_OFF; - led->ldev.brightness_set = pca9532_set_brightness; + led->ldev.brightness_set_blocking = + pca9532_set_brightness; led->ldev.blink_set = pca9532_set_blink; - INIT_WORK(&led->work, pca9532_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index b775e1efecd3..840401ae9a4e 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -47,7 +47,6 @@ #include <linux/leds.h> #include <linux/err.h> #include <linux/i2c.h> -#include <linux/workqueue.h> #include <linux/slab.h> /* LED select registers determine the source that drives LED outputs */ @@ -110,8 +109,6 @@ struct pca955x { struct pca955x_led { struct pca955x *pca955x; - struct work_struct work; - enum led_brightness brightness; struct led_classdev led_cdev; int led_num; /* 0 .. 15 potentially */ char name[32]; @@ -193,7 +190,8 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n) pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n); } -static void pca955x_led_work(struct work_struct *work) +static int pca955x_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { struct pca955x_led *pca955x_led; struct pca955x *pca955x; @@ -201,7 +199,7 @@ static void pca955x_led_work(struct work_struct *work) int chip_ls; /* which LSx to use (0-3 potentially) */ int ls_led; /* which set of bits within LSx to use (0-3) */ - pca955x_led = container_of(work, struct pca955x_led, work); + pca955x_led = container_of(led_cdev, struct pca955x_led, led_cdev); pca955x = pca955x_led->pca955x; chip_ls = pca955x_led->led_num / 4; @@ -211,7 +209,7 @@ static void pca955x_led_work(struct work_struct *work) ls = pca955x_read_ls(pca955x->client, chip_ls); - switch (pca955x_led->brightness) { + switch (value) { case LED_FULL: ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); break; @@ -230,7 +228,7 @@ static void pca955x_led_work(struct work_struct *work) * just turning off for all other values. */ pca955x_write_pwm(pca955x->client, 1, - 255 - pca955x_led->brightness); + 255 - value); ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); break; } @@ -238,21 +236,8 @@ static void pca955x_led_work(struct work_struct *work) pca955x_write_ls(pca955x->client, chip_ls, ls); mutex_unlock(&pca955x->lock); -} - -static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value) -{ - struct pca955x_led *pca955x; - - pca955x = container_of(led_cdev, struct pca955x_led, led_cdev); - - pca955x->brightness = value; - /* - * Must use workqueue for the actual I/O since I2C operations - * can sleep. - */ - schedule_work(&pca955x->work); + return 0; } static int pca955x_probe(struct i2c_client *client, @@ -328,9 +313,7 @@ static int pca955x_probe(struct i2c_client *client, } pca955x_led->led_cdev.name = pca955x_led->name; - pca955x_led->led_cdev.brightness_set = pca955x_led_set; - - INIT_WORK(&pca955x_led->work, pca955x_led_work); + pca955x_led->led_cdev.brightness_set_blocking = pca955x_led_set; err = led_classdev_register(&client->dev, &pca955x_led->led_cdev); @@ -355,10 +338,8 @@ static int pca955x_probe(struct i2c_client *client, return 0; exit: - while (i--) { + while (i--) led_classdev_unregister(&pca955x->leds[i].led_cdev); - cancel_work_sync(&pca955x->leds[i].work); - } return err; } @@ -368,10 +349,8 @@ static int pca955x_remove(struct i2c_client *client) struct pca955x *pca955x = i2c_get_clientdata(client); int i; - for (i = 0; i < pca955x->chipdef->bits; i++) { + for (i = 0; i < pca955x->chipdef->bits; i++) led_classdev_unregister(&pca955x->leds[i].led_cdev); - cancel_work_sync(&pca955x->leds[i].work); - } return 0; } diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c index 41f269fe0920..407eba11e187 100644 --- a/drivers/leds/leds-pca963x.c +++ b/drivers/leds/leds-pca963x.c @@ -32,7 +32,6 @@ #include <linux/leds.h> #include <linux/err.h> #include <linux/i2c.h> -#include <linux/workqueue.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/platform_data/leds-pca963x.h> @@ -96,11 +95,6 @@ static const struct i2c_device_id pca963x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca963x_id); -enum pca963x_cmd { - BRIGHTNESS_SET, - BLINK_SET, -}; - struct pca963x_led; struct pca963x { @@ -112,47 +106,52 @@ struct pca963x { struct pca963x_led { struct pca963x *chip; - struct work_struct work; - enum led_brightness brightness; struct led_classdev led_cdev; int led_num; /* 0 .. 15 potentially */ - enum pca963x_cmd cmd; char name[32]; u8 gdc; u8 gfrq; }; -static void pca963x_brightness_work(struct pca963x_led *pca963x) +static int pca963x_brightness(struct pca963x_led *pca963x, + enum led_brightness brightness) { u8 ledout_addr = pca963x->chip->chipdef->ledout_base + (pca963x->led_num / 4); u8 ledout; int shift = 2 * (pca963x->led_num % 4); u8 mask = 0x3 << shift; + int ret; mutex_lock(&pca963x->chip->mutex); ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr); - switch (pca963x->brightness) { + switch (brightness) { case LED_FULL: - i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr, + ret = i2c_smbus_write_byte_data(pca963x->chip->client, + ledout_addr, (ledout & ~mask) | (PCA963X_LED_ON << shift)); break; case LED_OFF: - i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr, - ledout & ~mask); + ret = i2c_smbus_write_byte_data(pca963x->chip->client, + ledout_addr, ledout & ~mask); break; default: - i2c_smbus_write_byte_data(pca963x->chip->client, + ret = i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_PWM_BASE + pca963x->led_num, - pca963x->brightness); - i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr, + brightness); + if (ret < 0) + goto unlock; + ret = i2c_smbus_write_byte_data(pca963x->chip->client, + ledout_addr, (ledout & ~mask) | (PCA963X_LED_PWM << shift)); break; } +unlock: mutex_unlock(&pca963x->chip->mutex); + return ret; } -static void pca963x_blink_work(struct pca963x_led *pca963x) +static void pca963x_blink(struct pca963x_led *pca963x) { u8 ledout_addr = pca963x->chip->chipdef->ledout_base + (pca963x->led_num / 4); @@ -180,36 +179,14 @@ static void pca963x_blink_work(struct pca963x_led *pca963x) mutex_unlock(&pca963x->chip->mutex); } -static void pca963x_work(struct work_struct *work) -{ - struct pca963x_led *pca963x = container_of(work, - struct pca963x_led, work); - - switch (pca963x->cmd) { - case BRIGHTNESS_SET: - pca963x_brightness_work(pca963x); - break; - case BLINK_SET: - pca963x_blink_work(pca963x); - break; - } -} - -static void pca963x_led_set(struct led_classdev *led_cdev, +static int pca963x_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct pca963x_led *pca963x; pca963x = container_of(led_cdev, struct pca963x_led, led_cdev); - pca963x->cmd = BRIGHTNESS_SET; - pca963x->brightness = value; - - /* - * Must use workqueue for the actual I/O since I2C operations - * can sleep. - */ - schedule_work(&pca963x->work); + return pca963x_brightness(pca963x, value); } static int pca963x_blink_set(struct led_classdev *led_cdev, @@ -254,15 +231,10 @@ static int pca963x_blink_set(struct led_classdev *led_cdev, */ gfrq = (period * 24 / 1000) - 1; - pca963x->cmd = BLINK_SET; pca963x->gdc = gdc; pca963x->gfrq = gfrq; - /* - * Must use workqueue for the actual I/O since I2C operations - * can sleep. - */ - schedule_work(&pca963x->work); + pca963x_blink(pca963x); *delay_on = time_on; *delay_off = time_off; @@ -409,13 +381,11 @@ static int pca963x_probe(struct i2c_client *client, client->addr, i); pca963x[i].led_cdev.name = pca963x[i].name; - pca963x[i].led_cdev.brightness_set = pca963x_led_set; + pca963x[i].led_cdev.brightness_set_blocking = pca963x_led_set; if (pdata && pdata->blink_type == PCA963X_HW_BLINK) pca963x[i].led_cdev.blink_set = pca963x_blink_set; - INIT_WORK(&pca963x[i].work, pca963x_work); - err = led_classdev_register(&client->dev, &pca963x[i].led_cdev); if (err < 0) goto exit; @@ -435,10 +405,8 @@ static int pca963x_probe(struct i2c_client *client, return 0; exit: - while (i--) { + while (i--) led_classdev_unregister(&pca963x[i].led_cdev); - cancel_work_sync(&pca963x[i].work); - } return err; } @@ -448,10 +416,8 @@ static int pca963x_remove(struct i2c_client *client) struct pca963x *pca963x = i2c_get_clientdata(client); int i; - for (i = 0; i < pca963x->chipdef->n_leds; i++) { + for (i = 0; i < pca963x->chipdef->n_leds; i++) led_classdev_unregister(&pca963x->leds[i].led_cdev); - cancel_work_sync(&pca963x->leds[i].work); - } return 0; } diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c index 1e75e1fe9b72..dfb8bd390125 100644 --- a/drivers/leds/leds-powernv.c +++ b/drivers/leds/leds-powernv.c @@ -77,7 +77,7 @@ static int powernv_get_led_type(const char *led_type_desc) * This function is called from work queue task context when ever it gets * scheduled. This function can sleep at opal_async_wait_response call. */ -static void powernv_led_set(struct powernv_led_data *powernv_led, +static int powernv_led_set(struct powernv_led_data *powernv_led, enum led_brightness value) { int rc, token; @@ -99,7 +99,7 @@ static void powernv_led_set(struct powernv_led_data *powernv_led, if (token != -ERESTARTSYS) dev_err(dev, "%s: Couldn't get OPAL async token\n", __func__); - return; + return token; } rc = opal_leds_set_ind(token, powernv_led->loc_code, @@ -125,6 +125,7 @@ static void powernv_led_set(struct powernv_led_data *powernv_led, out_token: opal_async_release_token(token); + return rc; } /* @@ -173,20 +174,23 @@ static enum led_brightness powernv_led_get(struct powernv_led_data *powernv_led) * LED classdev 'brightness_get' function. This schedules work * to update LED state. */ -static void powernv_brightness_set(struct led_classdev *led_cdev, +static int powernv_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct powernv_led_data *powernv_led = container_of(led_cdev, struct powernv_led_data, cdev); struct powernv_led_common *powernv_led_common = powernv_led->common; + int rc; /* Do not modify LED in unload path */ if (powernv_led_common->led_disabled) - return; + return 0; mutex_lock(&powernv_led_common->lock); - powernv_led_set(powernv_led, value); + rc = powernv_led_set(powernv_led, value); mutex_unlock(&powernv_led_common->lock); + + return rc; } /* LED classdev 'brightness_get' function */ @@ -227,7 +231,7 @@ static int powernv_led_create(struct device *dev, return -ENOMEM; } - powernv_led->cdev.brightness_set = powernv_brightness_set; + powernv_led->cdev.brightness_set_blocking = powernv_brightness_set; powernv_led->cdev.brightness_get = powernv_brightness_get; powernv_led->cdev.brightness = LED_OFF; powernv_led->cdev.max_brightness = LED_FULL; @@ -256,8 +260,6 @@ static int powernv_led_classdev(struct platform_device *pdev, for_each_child_of_node(led_node, np) { p = of_find_property(np, "led-types", NULL); - if (!p) - continue; while ((cur = of_prop_next_string(p, cur)) != NULL) { powernv_led = devm_kzalloc(dev, sizeof(*powernv_led), diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 1d07e3e83d29..4783bacb2e9d 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -22,12 +22,10 @@ #include <linux/pwm.h> #include <linux/leds_pwm.h> #include <linux/slab.h> -#include <linux/workqueue.h> struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; - struct work_struct work; unsigned int active_low; unsigned int period; int duty; @@ -51,14 +49,6 @@ static void __led_pwm_set(struct led_pwm_data *led_dat) pwm_enable(led_dat->pwm); } -static void led_pwm_work(struct work_struct *work) -{ - struct led_pwm_data *led_dat = - container_of(work, struct led_pwm_data, work); - - __led_pwm_set(led_dat); -} - static void led_pwm_set(struct led_classdev *led_cdev, enum led_brightness brightness) { @@ -75,10 +65,14 @@ static void led_pwm_set(struct led_classdev *led_cdev, led_dat->duty = duty; - if (led_dat->can_sleep) - schedule_work(&led_dat->work); - else - __led_pwm_set(led_dat); + __led_pwm_set(led_dat); +} + +static int led_pwm_set_blocking(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + led_pwm_set(led_cdev, brightness); + return 0; } static inline size_t sizeof_pwm_leds_priv(int num_leds) @@ -89,11 +83,8 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds) static void led_pwm_cleanup(struct led_pwm_priv *priv) { - while (priv->num_leds--) { + while (priv->num_leds--) led_classdev_unregister(&priv->leds[priv->num_leds].cdev); - if (priv->leds[priv->num_leds].can_sleep) - cancel_work_sync(&priv->leds[priv->num_leds].work); - } } static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, @@ -105,7 +96,6 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, led_data->active_low = led->active_low; led_data->cdev.name = led->name; led_data->cdev.default_trigger = led->default_trigger; - led_data->cdev.brightness_set = led_pwm_set; led_data->cdev.brightness = LED_OFF; led_data->cdev.max_brightness = led->max_brightness; led_data->cdev.flags = LED_CORE_SUSPENDRESUME; @@ -122,8 +112,10 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, } led_data->can_sleep = pwm_can_sleep(led_data->pwm); - if (led_data->can_sleep) - INIT_WORK(&led_data->work, led_pwm_work); + if (!led_data->can_sleep) + led_data->cdev.brightness_set = led_pwm_set; + else + led_data->cdev.brightness_set_blocking = led_pwm_set_blocking; led_data->period = pwm_get_period(led_data->pwm); if (!led_data->period && (led->pwm_period_ns > 0)) @@ -132,6 +124,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, ret = led_classdev_register(dev, &led_data->cdev); if (ret == 0) { priv->num_leds++; + led_pwm_set(&led_data->cdev, led_data->cdev.brightness); } else { dev_err(dev, "failed to register PWM led for %s: %d\n", led->name, ret); @@ -236,6 +229,6 @@ static struct platform_driver led_pwm_driver = { module_platform_driver(led_pwm_driver); MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>"); -MODULE_DESCRIPTION("PWM LED driver for PXA"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("generic PWM LED driver"); +MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:leds-pwm"); diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index ffc21397a675..acf77ca47558 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/leds.h> #include <linux/leds-regulator.h> #include <linux/platform_device.h> @@ -25,10 +24,8 @@ struct regulator_led { struct led_classdev cdev; - enum led_brightness value; int enabled; struct mutex mutex; - struct work_struct work; struct regulator *vcc; }; @@ -94,22 +91,24 @@ static void regulator_led_disable(struct regulator_led *led) led->enabled = 0; } -static void regulator_led_set_value(struct regulator_led *led) +static int regulator_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) { + struct regulator_led *led = to_regulator_led(led_cdev); int voltage; - int ret; + int ret = 0; mutex_lock(&led->mutex); - if (led->value == LED_OFF) { + if (value == LED_OFF) { regulator_led_disable(led); goto out; } if (led->cdev.max_brightness > 1) { - voltage = led_regulator_get_voltage(led->vcc, led->value); + voltage = led_regulator_get_voltage(led->vcc, value); dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n", - led->value, voltage); + value, voltage); ret = regulator_set_voltage(led->vcc, voltage, voltage); if (ret != 0) @@ -121,23 +120,7 @@ static void regulator_led_set_value(struct regulator_led *led) out: mutex_unlock(&led->mutex); -} - -static void led_work(struct work_struct *work) -{ - struct regulator_led *led; - - led = container_of(work, struct regulator_led, work); - regulator_led_set_value(led); -} - -static void regulator_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct regulator_led *led = to_regulator_led(led_cdev); - - led->value = value; - schedule_work(&led->work); + return ret; } static int regulator_led_probe(struct platform_device *pdev) @@ -169,9 +152,8 @@ static int regulator_led_probe(struct platform_device *pdev) pdata->brightness); return -EINVAL; } - led->value = pdata->brightness; - led->cdev.brightness_set = regulator_led_brightness_set; + led->cdev.brightness_set_blocking = regulator_led_brightness_set; led->cdev.name = pdata->name; led->cdev.flags |= LED_CORE_SUSPENDRESUME; led->vcc = vcc; @@ -181,21 +163,18 @@ static int regulator_led_probe(struct platform_device *pdev) led->enabled = 1; mutex_init(&led->mutex); - INIT_WORK(&led->work, led_work); platform_set_drvdata(pdev, led); ret = led_classdev_register(&pdev->dev, &led->cdev); - if (ret < 0) { - cancel_work_sync(&led->work); + if (ret < 0) return ret; - } /* to expose the default value to userspace */ - led->cdev.brightness = led->value; + led->cdev.brightness = pdata->brightness; /* Set the default led status */ - regulator_led_set_value(led); + regulator_led_brightness_set(&led->cdev, led->cdev.brightness); return 0; } @@ -205,7 +184,6 @@ static int regulator_led_remove(struct platform_device *pdev) struct regulator_led *led = platform_get_drvdata(pdev); led_classdev_unregister(&led->cdev); - cancel_work_sync(&led->work); regulator_led_disable(led); return 0; } diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c index c2553c54f2cf..7c09db8bd4e8 100644 --- a/drivers/leds/leds-sunfire.c +++ b/drivers/leds/leds-sunfire.c @@ -234,28 +234,19 @@ static struct platform_driver sunfire_fhc_led_driver = { }, }; +static struct platform_driver * const drivers[] = { + &sunfire_clockboard_led_driver, + &sunfire_fhc_led_driver, +}; + static int __init sunfire_leds_init(void) { - int err = platform_driver_register(&sunfire_clockboard_led_driver); - - if (err) { - pr_err("Could not register clock board LED driver\n"); - return err; - } - - err = platform_driver_register(&sunfire_fhc_led_driver); - if (err) { - pr_err("Could not register FHC LED driver\n"); - platform_driver_unregister(&sunfire_clockboard_led_driver); - } - - return err; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit sunfire_leds_exit(void) { - platform_driver_unregister(&sunfire_clockboard_led_driver); - platform_driver_unregister(&sunfire_fhc_led_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(sunfire_leds_init); diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c index b88900d721e4..3be40f74f12a 100644 --- a/drivers/leds/leds-syscon.c +++ b/drivers/leds/leds-syscon.c @@ -20,7 +20,7 @@ * MA 02111-1307 USA */ #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/platform_device.h> @@ -139,29 +139,17 @@ static int syscon_led_probe(struct platform_device *pdev) return 0; } -static int syscon_led_remove(struct platform_device *pdev) -{ - struct syscon_led *sled = platform_get_drvdata(pdev); - - led_classdev_unregister(&sled->cdev); - /* Turn it off */ - regmap_update_bits(sled->map, sled->offset, sled->mask, 0); - return 0; -} - static const struct of_device_id of_syscon_leds_match[] = { { .compatible = "register-bit-led", }, {}, }; -MODULE_DEVICE_TABLE(of, of_syscon_leds_match); - static struct platform_driver syscon_led_driver = { .probe = syscon_led_probe, - .remove = syscon_led_remove, .driver = { .name = "leds-syscon", .of_match_table = of_syscon_leds_match, + .suppress_bind_attrs = true, }, }; -module_platform_driver(syscon_led_driver); +builtin_platform_driver(syscon_led_driver); diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c index b806eca83d27..304531644938 100644 --- a/drivers/leds/leds-tlc591xx.c +++ b/drivers/leds/leds-tlc591xx.c @@ -14,7 +14,6 @@ #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/slab.h> -#include <linux/workqueue.h> #define TLC591XX_MAX_LEDS 16 @@ -42,13 +41,11 @@ #define LEDOUT_MASK 0x3 #define ldev_to_led(c) container_of(c, struct tlc591xx_led, ldev) -#define work_to_led(work) container_of(work, struct tlc591xx_led, work) struct tlc591xx_led { bool active; unsigned int led_no; struct led_classdev ldev; - struct work_struct work; struct tlc591xx_priv *priv; }; @@ -110,12 +107,12 @@ tlc591xx_set_pwm(struct tlc591xx_priv *priv, struct tlc591xx_led *led, return regmap_write(priv->regmap, pwm, brightness); } -static void -tlc591xx_led_work(struct work_struct *work) +static int +tlc591xx_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { - struct tlc591xx_led *led = work_to_led(work); + struct tlc591xx_led *led = ldev_to_led(led_cdev); struct tlc591xx_priv *priv = led->priv; - enum led_brightness brightness = led->ldev.brightness; int err; switch (brightness) { @@ -131,18 +128,7 @@ tlc591xx_led_work(struct work_struct *work) err = tlc591xx_set_pwm(priv, led, brightness); } - if (err) - dev_err(led->ldev.dev, "Failed setting brightness\n"); -} - -static void -tlc591xx_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct tlc591xx_led *led = ldev_to_led(led_cdev); - - led->ldev.brightness = brightness; - schedule_work(&led->work); + return err; } static void @@ -151,10 +137,8 @@ tlc591xx_destroy_devices(struct tlc591xx_priv *priv, unsigned int j) int i = j; while (--i >= 0) { - if (priv->leds[i].active) { + if (priv->leds[i].active) led_classdev_unregister(&priv->leds[i].ldev); - cancel_work_sync(&priv->leds[i].work); - } } } @@ -175,9 +159,8 @@ tlc591xx_configure(struct device *dev, led->priv = priv; led->led_no = i; - led->ldev.brightness_set = tlc591xx_brightness_set; + led->ldev.brightness_set_blocking = tlc591xx_brightness_set; led->ldev.max_brightness = LED_FULL; - INIT_WORK(&led->work, tlc591xx_led_work); err = led_classdev_register(dev, &led->ldev); if (err < 0) { dev_err(dev, "couldn't register LED %s\n", diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index 56027ef7c7e8..64a22263e7fc 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -23,7 +23,6 @@ struct wm831x_status { struct led_classdev cdev; struct wm831x *wm831x; - struct work_struct work; struct mutex mutex; spinlock_t value_lock; @@ -40,10 +39,8 @@ struct wm831x_status { #define to_wm831x_status(led_cdev) \ container_of(led_cdev, struct wm831x_status, cdev) -static void wm831x_status_work(struct work_struct *work) +static void wm831x_status_set(struct wm831x_status *led) { - struct wm831x_status *led = container_of(work, struct wm831x_status, - work); unsigned long flags; mutex_lock(&led->mutex); @@ -70,8 +67,8 @@ static void wm831x_status_work(struct work_struct *work) mutex_unlock(&led->mutex); } -static void wm831x_status_set(struct led_classdev *led_cdev, - enum led_brightness value) +static int wm831x_status_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) { struct wm831x_status *led = to_wm831x_status(led_cdev); unsigned long flags; @@ -80,8 +77,10 @@ static void wm831x_status_set(struct led_classdev *led_cdev, led->brightness = value; if (value == LED_OFF) led->blink = 0; - schedule_work(&led->work); spin_unlock_irqrestore(&led->value_lock, flags); + wm831x_status_set(led); + + return 0; } static int wm831x_status_blink_set(struct led_classdev *led_cdev, @@ -147,11 +146,8 @@ static int wm831x_status_blink_set(struct led_classdev *led_cdev, else led->blink = 0; - /* Always update; if we fail turn off blinking since we expect - * a software fallback. */ - schedule_work(&led->work); - spin_unlock_irqrestore(&led->value_lock, flags); + wm831x_status_set(led); return ret; } @@ -206,11 +202,9 @@ static ssize_t wm831x_status_src_store(struct device *dev, for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) { if (!strcmp(name, led_src_texts[i])) { mutex_lock(&led->mutex); - led->src = i; - schedule_work(&led->work); - mutex_unlock(&led->mutex); + wm831x_status_set(led); } } @@ -262,7 +256,6 @@ static int wm831x_status_probe(struct platform_device *pdev) pdata.name = dev_name(&pdev->dev); mutex_init(&drvdata->mutex); - INIT_WORK(&drvdata->work, wm831x_status_work); spin_lock_init(&drvdata->value_lock); /* We cache the configuration register and read startup values @@ -287,7 +280,7 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.name = pdata.name; drvdata->cdev.default_trigger = pdata.default_trigger; - drvdata->cdev.brightness_set = wm831x_status_set; + drvdata->cdev.brightness_set_blocking = wm831x_status_brightness_set; drvdata->cdev.blink_set = wm831x_status_blink_set; drvdata->cdev.groups = wm831x_status_groups; diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c index 0d121835673f..e1e4e9d0b8b1 100644 --- a/drivers/leds/leds-wm8350.c +++ b/drivers/leds/leds-wm8350.c @@ -89,40 +89,42 @@ static const int isink_cur[] = { #define to_wm8350_led(led_cdev) \ container_of(led_cdev, struct wm8350_led, cdev) -static void wm8350_led_enable(struct wm8350_led *led) +static int wm8350_led_enable(struct wm8350_led *led) { - int ret; + int ret = 0; if (led->enabled) - return; + return ret; ret = regulator_enable(led->isink); if (ret != 0) { dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret); - return; + return ret; } ret = regulator_enable(led->dcdc); if (ret != 0) { dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret); regulator_disable(led->isink); - return; + return ret; } led->enabled = 1; + + return ret; } -static void wm8350_led_disable(struct wm8350_led *led) +static int wm8350_led_disable(struct wm8350_led *led) { - int ret; + int ret = 0; if (!led->enabled) - return; + return ret; ret = regulator_disable(led->dcdc); if (ret != 0) { dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret); - return; + return ret; } ret = regulator_disable(led->isink); @@ -132,27 +134,29 @@ static void wm8350_led_disable(struct wm8350_led *led) if (ret != 0) dev_err(led->cdev.dev, "Failed to reenable DCDC: %d\n", ret); - return; + return ret; } led->enabled = 0; + + return ret; } -static void led_work(struct work_struct *work) +static int wm8350_led_set(struct led_classdev *led_cdev, + enum led_brightness value) { - struct wm8350_led *led = container_of(work, struct wm8350_led, work); + struct wm8350_led *led = to_wm8350_led(led_cdev); + unsigned long flags; int ret; int uA; - unsigned long flags; - mutex_lock(&led->mutex); + led->value = value; spin_lock_irqsave(&led->value_lock, flags); if (led->value == LED_OFF) { spin_unlock_irqrestore(&led->value_lock, flags); - wm8350_led_disable(led); - goto out; + return wm8350_led_disable(led); } /* This scales linearly into the index of valid current @@ -166,36 +170,21 @@ static void led_work(struct work_struct *work) ret = regulator_set_current_limit(led->isink, isink_cur[uA], isink_cur[uA]); - if (ret != 0) + if (ret != 0) { dev_err(led->cdev.dev, "Failed to set %duA: %d\n", isink_cur[uA], ret); + return ret; + } - wm8350_led_enable(led); - -out: - mutex_unlock(&led->mutex); -} - -static void wm8350_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct wm8350_led *led = to_wm8350_led(led_cdev); - unsigned long flags; - - spin_lock_irqsave(&led->value_lock, flags); - led->value = value; - schedule_work(&led->work); - spin_unlock_irqrestore(&led->value_lock, flags); + return wm8350_led_enable(led); } static void wm8350_led_shutdown(struct platform_device *pdev) { struct wm8350_led *led = platform_get_drvdata(pdev); - mutex_lock(&led->mutex); led->value = LED_OFF; wm8350_led_disable(led); - mutex_unlock(&led->mutex); } static int wm8350_led_probe(struct platform_device *pdev) @@ -232,7 +221,7 @@ static int wm8350_led_probe(struct platform_device *pdev) if (led == NULL) return -ENOMEM; - led->cdev.brightness_set = wm8350_led_set; + led->cdev.brightness_set_blocking = wm8350_led_set; led->cdev.default_trigger = pdata->default_trigger; led->cdev.name = pdata->name; led->cdev.flags |= LED_CORE_SUSPENDRESUME; @@ -251,8 +240,6 @@ static int wm8350_led_probe(struct platform_device *pdev) pdata->max_uA); spin_lock_init(&led->value_lock); - mutex_init(&led->mutex); - INIT_WORK(&led->work, led_work); led->value = LED_OFF; platform_set_drvdata(pdev, led); @@ -264,7 +251,6 @@ static int wm8350_led_remove(struct platform_device *pdev) struct wm8350_led *led = platform_get_drvdata(pdev); led_classdev_unregister(&led->cdev); - flush_work(&led->work); wm8350_led_disable(led); return 0; } diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h index 4238fbc31d35..db3f20da7221 100644 --- a/drivers/leds/leds.h +++ b/drivers/leds/leds.h @@ -16,29 +16,6 @@ #include <linux/rwsem.h> #include <linux/leds.h> -static inline void led_set_brightness_async(struct led_classdev *led_cdev, - enum led_brightness value) -{ - value = min(value, led_cdev->max_brightness); - led_cdev->brightness = value; - - if (!(led_cdev->flags & LED_SUSPENDED)) - led_cdev->brightness_set(led_cdev, value); -} - -static inline int led_set_brightness_sync(struct led_classdev *led_cdev, - enum led_brightness value) -{ - int ret = 0; - - led_cdev->brightness = min(value, led_cdev->max_brightness); - - if (!(led_cdev->flags & LED_SUSPENDED)) - ret = led_cdev->brightness_set_sync(led_cdev, - led_cdev->brightness); - return ret; -} - static inline int led_get_brightness(struct led_classdev *led_cdev) { return led_cdev->brightness; @@ -46,6 +23,10 @@ static inline int led_get_brightness(struct led_classdev *led_cdev) void led_init_core(struct led_classdev *led_cdev); void led_stop_software_blink(struct led_classdev *led_cdev); +void led_set_brightness_nopm(struct led_classdev *led_cdev, + enum led_brightness value); +void led_set_brightness_nosleep(struct led_classdev *led_cdev, + enum led_brightness value); extern struct rw_semaphore leds_list_lock; extern struct list_head leds_list; diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c index 59eca17d9661..1ca1f1608f76 100644 --- a/drivers/leds/trigger/ledtrig-backlight.c +++ b/drivers/leds/trigger/ledtrig-backlight.c @@ -51,9 +51,9 @@ static int fb_notifier_callback(struct notifier_block *p, if ((n->old_status == UNBLANK) ^ n->invert) { n->brightness = led->brightness; - led_set_brightness_async(led, LED_OFF); + led_set_brightness_nosleep(led, LED_OFF); } else { - led_set_brightness_async(led, n->brightness); + led_set_brightness_nosleep(led, n->brightness); } n->old_status = new_status; @@ -89,9 +89,9 @@ static ssize_t bl_trig_invert_store(struct device *dev, /* After inverting, we need to update the LED. */ if ((n->old_status == BLANK) ^ n->invert) - led_set_brightness_async(led, LED_OFF); + led_set_brightness_nosleep(led, LED_OFF); else - led_set_brightness_async(led, n->brightness); + led_set_brightness_nosleep(led, n->brightness); return num; } diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index aec0f02b6b3e..938467fb82be 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -19,7 +19,6 @@ * */ -#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> @@ -140,27 +139,4 @@ static int __init ledtrig_cpu_init(void) return 0; } -module_init(ledtrig_cpu_init); - -static void __exit ledtrig_cpu_exit(void) -{ - int cpu; - - unregister_cpu_notifier(&ledtrig_cpu_nb); - - for_each_possible_cpu(cpu) { - struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - - led_trigger_unregister_simple(trig->_trig); - trig->_trig = NULL; - memset(trig->name, 0, MAX_NAME_LEN); - } - - unregister_syscore_ops(&ledtrig_cpu_syscore_ops); -} -module_exit(ledtrig_cpu_exit); - -MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); -MODULE_AUTHOR("Bryan Wu <bryan.wu@canonical.com>"); -MODULE_DESCRIPTION("CPU LED trigger"); -MODULE_LICENSE("GPL"); +device_initcall(ledtrig_cpu_init); diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c index 6f38f883aaf1..ff455cb46680 100644 --- a/drivers/leds/trigger/ledtrig-default-on.c +++ b/drivers/leds/trigger/ledtrig-default-on.c @@ -19,7 +19,7 @@ static void defon_trig_activate(struct led_classdev *led_cdev) { - led_set_brightness_async(led_cdev, led_cdev->max_brightness); + led_set_brightness_nosleep(led_cdev, led_cdev->max_brightness); } static struct led_trigger defon_led_trigger = { diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c index 4cc7040746c6..51288a45fbcb 100644 --- a/drivers/leds/trigger/ledtrig-gpio.c +++ b/drivers/leds/trigger/ledtrig-gpio.c @@ -54,12 +54,12 @@ static void gpio_trig_work(struct work_struct *work) if (tmp) { if (gpio_data->desired_brightness) - led_set_brightness_async(gpio_data->led, + led_set_brightness_nosleep(gpio_data->led, gpio_data->desired_brightness); else - led_set_brightness_async(gpio_data->led, LED_FULL); + led_set_brightness_nosleep(gpio_data->led, LED_FULL); } else { - led_set_brightness_async(gpio_data->led, LED_OFF); + led_set_brightness_nosleep(gpio_data->led, LED_OFF); } } diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index 8622ce651ae2..410c39c62dc7 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c @@ -38,7 +38,7 @@ static void led_heartbeat_function(unsigned long data) unsigned long delay = 0; if (unlikely(panic_heartbeats)) { - led_set_brightness(led_cdev, LED_OFF); + led_set_brightness_nosleep(led_cdev, LED_OFF); return; } @@ -81,7 +81,7 @@ static void led_heartbeat_function(unsigned long data) break; } - led_set_brightness_async(led_cdev, brightness); + led_set_brightness_nosleep(led_cdev, brightness); mod_timer(&heartbeat_data->timer, jiffies + delay); } diff --git a/drivers/leds/trigger/ledtrig-ide-disk.c b/drivers/leds/trigger/ledtrig-ide-disk.c index 2cd7c0cf5924..c02a3ac3cd2b 100644 --- a/drivers/leds/trigger/ledtrig-ide-disk.c +++ b/drivers/leds/trigger/ledtrig-ide-disk.c @@ -11,7 +11,6 @@ * */ -#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/leds.h> @@ -33,15 +32,4 @@ static int __init ledtrig_ide_init(void) led_trigger_register_simple("ide-disk", &ledtrig_ide); return 0; } - -static void __exit ledtrig_ide_exit(void) -{ - led_trigger_unregister_simple(ledtrig_ide); -} - -module_init(ledtrig_ide_init); -module_exit(ledtrig_ide_exit); - -MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); -MODULE_DESCRIPTION("LED IDE Disk Activity Trigger"); -MODULE_LICENSE("GPL"); +device_initcall(ledtrig_ide_init); diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c index fbd02cdc3ad7..b8ea9f0f1e19 100644 --- a/drivers/leds/trigger/ledtrig-oneshot.c +++ b/drivers/leds/trigger/ledtrig-oneshot.c @@ -63,9 +63,9 @@ static ssize_t led_invert_store(struct device *dev, oneshot_data->invert = !!state; if (oneshot_data->invert) - led_set_brightness_async(led_cdev, LED_FULL); + led_set_brightness_nosleep(led_cdev, LED_FULL); else - led_set_brightness_async(led_cdev, LED_OFF); + led_set_brightness_nosleep(led_cdev, LED_OFF); return size; } @@ -201,4 +201,4 @@ module_exit(oneshot_trig_exit); MODULE_AUTHOR("Fabio Baltieri <fabio.baltieri@gmail.com>"); MODULE_DESCRIPTION("One-shot LED trigger"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c index 3c34de404d18..7e6011bd3646 100644 --- a/drivers/leds/trigger/ledtrig-transient.c +++ b/drivers/leds/trigger/ledtrig-transient.c @@ -41,7 +41,7 @@ static void transient_timer_function(unsigned long data) struct transient_trig_data *transient_data = led_cdev->trigger_data; transient_data->activate = 0; - led_set_brightness_async(led_cdev, transient_data->restore_state); + led_set_brightness_nosleep(led_cdev, transient_data->restore_state); } static ssize_t transient_activate_show(struct device *dev, @@ -72,7 +72,7 @@ static ssize_t transient_activate_store(struct device *dev, if (state == 0 && transient_data->activate == 1) { del_timer(&transient_data->timer); transient_data->activate = state; - led_set_brightness_async(led_cdev, + led_set_brightness_nosleep(led_cdev, transient_data->restore_state); return size; } @@ -81,11 +81,11 @@ static ssize_t transient_activate_store(struct device *dev, if (state == 1 && transient_data->activate == 0 && transient_data->duration != 0) { transient_data->activate = state; - led_set_brightness_async(led_cdev, transient_data->state); + led_set_brightness_nosleep(led_cdev, transient_data->state); transient_data->restore_state = (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL; mod_timer(&transient_data->timer, - jiffies + transient_data->duration); + jiffies + msecs_to_jiffies(transient_data->duration)); } /* state == 0 && transient_data->activate == 0 @@ -204,7 +204,7 @@ static void transient_trig_deactivate(struct led_classdev *led_cdev) if (led_cdev->activated) { del_timer_sync(&transient_data->timer); - led_set_brightness_async(led_cdev, + led_set_brightness_nosleep(led_cdev, transient_data->restore_state); device_remove_file(led_cdev->dev, &dev_attr_activate); device_remove_file(led_cdev->dev, &dev_attr_duration); diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index f434e89e1c7a..a54b339951a3 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -75,7 +75,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, struct nvm_block *blk; int i; - lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; + lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun]; for (i = 0; i < nr_blocks; i++) { if (blks[i] == 0) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 7913fdcfc849..0a2e7273db9e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -240,6 +240,15 @@ config DM_BUFIO as a cache, holding recently-read blocks in memory and performing delayed writes. +config DM_DEBUG_BLOCK_STACK_TRACING + bool "Keep stack trace of persistent data block lock holders" + depends on STACKTRACE_SUPPORT && DM_BUFIO + select STACKTRACE + ---help--- + Enable this for messages that may help debug problems with the + block manager locking used by thin provisioning and caching. + + If unsure, say N. config DM_BIO_PRISON tristate depends on BLK_DEV_DM @@ -458,6 +467,18 @@ config DM_VERITY If unsure, say N. +config DM_VERITY_FEC + bool "Verity forward error correction support" + depends on DM_VERITY + select REED_SOLOMON + select REED_SOLOMON_DEC8 + ---help--- + Add forward error correction support to dm-verity. This option + makes it possible to use pre-generated error correction data to + recover from corrupted blocks. + + If unsure, say N. + config DM_SWITCH tristate "Switch target support (EXPERIMENTAL)" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f34979cd141a..62a65764e8e0 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -16,6 +16,7 @@ dm-cache-mq-y += dm-cache-policy-mq.o dm-cache-smq-y += dm-cache-policy-smq.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o dm-era-y += dm-era-target.o +dm-verity-y += dm-verity-target.o md-mod-y += md.o bitmap.o raid456-y += raid5.o raid5-cache.o @@ -63,3 +64,7 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o ifeq ($(CONFIG_DM_UEVENT),y) dm-mod-objs += dm-uevent.o endif + +ifeq ($(CONFIG_DM_VERITY_FEC),y) +dm-verity-objs += dm-verity-fec.o +endif diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index db3ae4c2b223..dde6172f3f10 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -230,7 +230,7 @@ void bch_bio_map(struct bio *bio, void *base) BUG_ON(!bio->bi_iter.bi_size); BUG_ON(bio->bi_vcnt); - bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; + bv->bv_offset = base ? offset_in_page(base) : 0; goto start; for (; size; bio->bi_vcnt++, bv++) { diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 2dd33085b331..cd77216beff1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -16,6 +16,7 @@ #include <linux/shrinker.h> #include <linux/module.h> #include <linux/rbtree.h> +#include <linux/stacktrace.h> #define DM_MSG_PREFIX "bufio" @@ -149,6 +150,11 @@ struct dm_buffer { struct list_head write_list; struct bio bio; struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS]; +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING +#define MAX_STACK 10 + struct stack_trace stack_trace; + unsigned long stack_entries[MAX_STACK]; +#endif }; /*----------------------------------------------------------------*/ @@ -253,6 +259,17 @@ static LIST_HEAD(dm_bufio_all_clients); */ static DEFINE_MUTEX(dm_bufio_clients_lock); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING +static void buffer_record_stack(struct dm_buffer *b) +{ + b->stack_trace.nr_entries = 0; + b->stack_trace.max_entries = MAX_STACK; + b->stack_trace.entries = b->stack_entries; + b->stack_trace.skip = 2; + save_stack_trace(&b->stack_trace); +} +#endif + /*---------------------------------------------------------------- * A red/black tree acts as an index for all the buffers. *--------------------------------------------------------------*/ @@ -454,6 +471,9 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) adjust_total_allocated(b->data_mode, (long)c->block_size); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + memset(&b->stack_trace, 0, sizeof(b->stack_trace)); +#endif return b; } @@ -630,7 +650,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, do { if (!bio_add_page(&b->bio, virt_to_page(ptr), len < PAGE_SIZE ? len : PAGE_SIZE, - virt_to_phys(ptr) & (PAGE_SIZE - 1))) { + offset_in_page(ptr))) { BUG_ON(b->c->block_size <= PAGE_SIZE); use_dmio(b, rw, block, end_io); return; @@ -1063,12 +1083,16 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, dm_bufio_lock(c); b = __bufio_new(c, block, nf, &need_submit, &write_list); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + if (b && b->hold_count == 1) + buffer_record_stack(b); +#endif dm_bufio_unlock(c); __flush_write_list(&write_list); if (!b) - return b; + return NULL; if (need_submit) submit_io(b, READ, b->block, read_endio); @@ -1462,6 +1486,7 @@ static void drop_buffers(struct dm_bufio_client *c) { struct dm_buffer *b; int i; + bool warned = false; BUG_ON(dm_bufio_in_request()); @@ -1476,9 +1501,21 @@ static void drop_buffers(struct dm_bufio_client *c) __free_buffer_wake(b); for (i = 0; i < LIST_SIZE; i++) - list_for_each_entry(b, &c->lru[i], lru_list) + list_for_each_entry(b, &c->lru[i], lru_list) { + WARN_ON(!warned); + warned = true; DMERR("leaked buffer %llx, hold count %u, list %d", (unsigned long long)b->block, b->hold_count, i); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + print_stack_trace(&b->stack_trace, 1); + b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */ +#endif + } + +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + while ((b = __get_unclaimed_buffer(c))) + __free_buffer_wake(b); +#endif for (i = 0; i < LIST_SIZE; i++) BUG_ON(!list_empty(&c->lru[i])); @@ -1891,8 +1928,7 @@ static void __exit dm_bufio_exit(void) bug = 1; } - if (bug) - BUG(); + BUG_ON(bug); } module_init(dm_bufio_init) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 2fd4c8296144..5780accffa30 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -118,14 +118,12 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) */ struct dm_hook_info { bio_end_io_t *bi_end_io; - void *bi_private; }; static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, bio_end_io_t *bi_end_io, void *bi_private) { h->bi_end_io = bio->bi_end_io; - h->bi_private = bio->bi_private; bio->bi_end_io = bi_end_io; bio->bi_private = bi_private; @@ -134,7 +132,6 @@ static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) { bio->bi_end_io = h->bi_end_io; - bio->bi_private = h->bi_private; } /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index fae34e7a0b1e..12b5216c2cfe 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -69,7 +69,7 @@ struct dm_exception_store_type { * Update the metadata with this exception. */ void (*commit_exception) (struct dm_exception_store *store, - struct dm_exception *e, + struct dm_exception *e, int valid, void (*callback) (void *, int success), void *callback_context); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 81c5e1a1f363..06d426eb5a30 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -246,7 +246,7 @@ static void vm_dp_init(struct dpages *dp, void *data) { dp->get_page = vm_get_page; dp->next_page = vm_next_page; - dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); + dp->context_u = offset_in_page(data); dp->context_ptr = data; } @@ -271,7 +271,7 @@ static void km_dp_init(struct dpages *dp, void *data) { dp->get_page = km_get_page; dp->next_page = km_next_page; - dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); + dp->context_u = offset_in_page(data); dp->context_ptr = data; } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3164b8bce294..4d3909393f2c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, } static void persistent_commit_exception(struct dm_exception_store *store, - struct dm_exception *e, + struct dm_exception *e, int valid, void (*callback) (void *, int success), void *callback_context) { @@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store, struct core_exception ce; struct commit_callback *cb; + if (!valid) + ps->valid = 0; + ce.old_chunk = e->old_chunk; ce.new_chunk = e->new_chunk; write_exception(ps, ps->current_committed++, &ce); diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 9b7c8c8049d6..4d50a12cf00c 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store, } static void transient_commit_exception(struct dm_exception_store *store, - struct dm_exception *e, + struct dm_exception *e, int valid, void (*callback) (void *, int success), void *callback_context) { /* Just succeed */ - callback(callback_context, 1); + callback(callback_context, valid); } static void transient_usage(struct dm_exception_store *store, diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c06b74e91cd6..3766386080a4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -207,7 +207,6 @@ struct dm_snap_pending_exception { */ struct bio *full_bio; bio_end_io_t *full_bio_end_io; - void *full_bio_private; }; /* @@ -1438,8 +1437,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) dm_table_event(s->ti->table); } -static void pending_complete(struct dm_snap_pending_exception *pe, int success) +static void pending_complete(void *context, int success) { + struct dm_snap_pending_exception *pe = context; struct dm_exception *e; struct dm_snapshot *s = pe->snap; struct bio *origin_bios = NULL; @@ -1485,10 +1485,8 @@ out: snapshot_bios = bio_list_get(&pe->snapshot_bios); origin_bios = bio_list_get(&pe->origin_bios); full_bio = pe->full_bio; - if (full_bio) { + if (full_bio) full_bio->bi_end_io = pe->full_bio_end_io; - full_bio->bi_private = pe->full_bio_private; - } increment_pending_exceptions_done_count(); up_write(&s->lock); @@ -1509,24 +1507,13 @@ out: free_pending_exception(pe); } -static void commit_callback(void *context, int success) -{ - struct dm_snap_pending_exception *pe = context; - - pending_complete(pe, success); -} - static void complete_exception(struct dm_snap_pending_exception *pe) { struct dm_snapshot *s = pe->snap; - if (unlikely(pe->copy_error)) - pending_complete(pe, 0); - - else - /* Update the metadata if we are persistent */ - s->store->type->commit_exception(s->store, &pe->e, - commit_callback, pe); + /* Update the metadata if we are persistent */ + s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, + pending_complete, pe); } /* @@ -1605,7 +1592,6 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; - pe->full_bio_private = bio->bi_private; callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index c219a053c7f6..f962d6453afd 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1395,8 +1395,21 @@ static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time) return td->snapshotted_time > time; } -int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, - int can_issue_io, struct dm_thin_lookup_result *result) +static void unpack_lookup_result(struct dm_thin_device *td, __le64 value, + struct dm_thin_lookup_result *result) +{ + uint64_t block_time = 0; + dm_block_t exception_block; + uint32_t exception_time; + + block_time = le64_to_cpu(value); + unpack_block_time(block_time, &exception_block, &exception_time); + result->block = exception_block; + result->shared = __snapshotted_since(td, exception_time); +} + +static int __find_block(struct dm_thin_device *td, dm_block_t block, + int can_issue_io, struct dm_thin_lookup_result *result) { int r; __le64 value; @@ -1404,39 +1417,56 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, dm_block_t keys[2] = { td->id, block }; struct dm_btree_info *info; - down_read(&pmd->root_lock); - if (pmd->fail_io) { - up_read(&pmd->root_lock); - return -EINVAL; - } - if (can_issue_io) { info = &pmd->info; } else info = &pmd->nb_info; r = dm_btree_lookup(info, pmd->root, keys, &value); - if (!r) { - uint64_t block_time = 0; - dm_block_t exception_block; - uint32_t exception_time; - - block_time = le64_to_cpu(value); - unpack_block_time(block_time, &exception_block, - &exception_time); - result->block = exception_block; - result->shared = __snapshotted_since(td, exception_time); + if (!r) + unpack_lookup_result(td, value, result); + + return r; +} + +int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, + int can_issue_io, struct dm_thin_lookup_result *result) +{ + int r; + struct dm_pool_metadata *pmd = td->pmd; + + down_read(&pmd->root_lock); + if (pmd->fail_io) { + up_read(&pmd->root_lock); + return -EINVAL; } + r = __find_block(td, block, can_issue_io, result); + up_read(&pmd->root_lock); return r; } -/* FIXME: write a more efficient one in btree */ -int dm_thin_find_mapped_range(struct dm_thin_device *td, - dm_block_t begin, dm_block_t end, - dm_block_t *thin_begin, dm_block_t *thin_end, - dm_block_t *pool_begin, bool *maybe_shared) +static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block, + dm_block_t *vblock, + struct dm_thin_lookup_result *result) +{ + int r; + __le64 value; + struct dm_pool_metadata *pmd = td->pmd; + dm_block_t keys[2] = { td->id, block }; + + r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value); + if (!r) + unpack_lookup_result(td, value, result); + + return r; +} + +static int __find_mapped_range(struct dm_thin_device *td, + dm_block_t begin, dm_block_t end, + dm_block_t *thin_begin, dm_block_t *thin_end, + dm_block_t *pool_begin, bool *maybe_shared) { int r; dm_block_t pool_end; @@ -1445,21 +1475,11 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, if (end < begin) return -ENODATA; - /* - * Find first mapped block. - */ - while (begin < end) { - r = dm_thin_find_block(td, begin, true, &lookup); - if (r) { - if (r != -ENODATA) - return r; - } else - break; - - begin++; - } + r = __find_next_mapped_block(td, begin, &begin, &lookup); + if (r) + return r; - if (begin == end) + if (begin >= end) return -ENODATA; *thin_begin = begin; @@ -1469,7 +1489,7 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, begin++; pool_end = *pool_begin + 1; while (begin != end) { - r = dm_thin_find_block(td, begin, true, &lookup); + r = __find_block(td, begin, true, &lookup); if (r) { if (r == -ENODATA) break; @@ -1489,6 +1509,24 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, return 0; } +int dm_thin_find_mapped_range(struct dm_thin_device *td, + dm_block_t begin, dm_block_t end, + dm_block_t *thin_begin, dm_block_t *thin_end, + dm_block_t *pool_begin, bool *maybe_shared) +{ + int r = -EINVAL; + struct dm_pool_metadata *pmd = td->pmd; + + down_read(&pmd->root_lock); + if (!pmd->fail_io) { + r = __find_mapped_range(td, begin, end, thin_begin, thin_end, + pool_begin, maybe_shared); + } + up_read(&pmd->root_lock); + + return r; +} + static int __insert(struct dm_thin_device *td, dm_block_t block, dm_block_t data_block) { diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 63903a5a5d9e..72d91f477683 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti) struct pool_c *pt = ti->private; struct pool *pool = pt->pool; - cancel_delayed_work(&pool->waker); - cancel_delayed_work(&pool->no_space_timeout); + cancel_delayed_work_sync(&pool->waker); + cancel_delayed_work_sync(&pool->no_space_timeout); flush_workqueue(pool->wq); (void) commit(pool); } @@ -3886,7 +3886,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 16, 0}, + .version = {1, 17, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -4260,7 +4260,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type thin_target = { .name = "thin", - .version = {1, 16, 0}, + .version = {1, 17, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c new file mode 100644 index 000000000000..1cc10c4de701 --- /dev/null +++ b/drivers/md/dm-verity-fec.c @@ -0,0 +1,818 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * Author: Sami Tolvanen <samitolvanen@google.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include "dm-verity-fec.h" +#include <linux/math64.h> + +#define DM_MSG_PREFIX "verity-fec" + +/* + * If error correction has been configured, returns true. + */ +bool verity_fec_is_enabled(struct dm_verity *v) +{ + return v->fec && v->fec->dev; +} + +/* + * Return a pointer to dm_verity_fec_io after dm_verity_io and its variable + * length fields. + */ +static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io) +{ + return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io); +} + +/* + * Return an interleaved offset for a byte in RS block. + */ +static inline u64 fec_interleave(struct dm_verity *v, u64 offset) +{ + u32 mod; + + mod = do_div(offset, v->fec->rsn); + return offset + mod * (v->fec->rounds << v->data_dev_block_bits); +} + +/* + * Decode an RS block using Reed-Solomon. + */ +static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio, + u8 *data, u8 *fec, int neras) +{ + int i; + uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN]; + + for (i = 0; i < v->fec->roots; i++) + par[i] = fec[i]; + + return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras, + fio->erasures, 0, NULL); +} + +/* + * Read error-correcting codes for the requested RS block. Returns a pointer + * to the data block. Caller is responsible for releasing buf. + */ +static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, + unsigned *offset, struct dm_buffer **buf) +{ + u64 position, block; + u8 *res; + + position = (index + rsb) * v->fec->roots; + block = position >> v->data_dev_block_bits; + *offset = (unsigned)(position - (block << v->data_dev_block_bits)); + + res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf); + if (unlikely(IS_ERR(res))) { + DMERR("%s: FEC %llu: parity read failed (block %llu): %ld", + v->data_dev->name, (unsigned long long)rsb, + (unsigned long long)(v->fec->start + block), + PTR_ERR(res)); + *buf = NULL; + } + + return res; +} + +/* Loop over each preallocated buffer slot. */ +#define fec_for_each_prealloc_buffer(__i) \ + for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++) + +/* Loop over each extra buffer slot. */ +#define fec_for_each_extra_buffer(io, __i) \ + for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++) + +/* Loop over each allocated buffer. */ +#define fec_for_each_buffer(io, __i) \ + for (__i = 0; __i < (io)->nbufs; __i++) + +/* Loop over each RS block in each allocated buffer. */ +#define fec_for_each_buffer_rs_block(io, __i, __j) \ + fec_for_each_buffer(io, __i) \ + for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++) + +/* + * Return a pointer to the current RS block when called inside + * fec_for_each_buffer_rs_block. + */ +static inline u8 *fec_buffer_rs_block(struct dm_verity *v, + struct dm_verity_fec_io *fio, + unsigned i, unsigned j) +{ + return &fio->bufs[i][j * v->fec->rsn]; +} + +/* + * Return an index to the current RS block when called inside + * fec_for_each_buffer_rs_block. + */ +static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j) +{ + return (i << DM_VERITY_FEC_BUF_RS_BITS) + j; +} + +/* + * Decode all RS blocks from buffers and copy corrected bytes into fio->output + * starting from block_offset. + */ +static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, + u64 rsb, int byte_index, unsigned block_offset, + int neras) +{ + int r, corrected = 0, res; + struct dm_buffer *buf; + unsigned n, i, offset; + u8 *par, *block; + + par = fec_read_parity(v, rsb, block_offset, &offset, &buf); + if (IS_ERR(par)) + return PTR_ERR(par); + + /* + * Decode the RS blocks we have in bufs. Each RS block results in + * one corrected target byte and consumes fec->roots parity bytes. + */ + fec_for_each_buffer_rs_block(fio, n, i) { + block = fec_buffer_rs_block(v, fio, n, i); + res = fec_decode_rs8(v, fio, block, &par[offset], neras); + if (res < 0) { + dm_bufio_release(buf); + + r = res; + goto error; + } + + corrected += res; + fio->output[block_offset] = block[byte_index]; + + block_offset++; + if (block_offset >= 1 << v->data_dev_block_bits) + goto done; + + /* read the next block when we run out of parity bytes */ + offset += v->fec->roots; + if (offset >= 1 << v->data_dev_block_bits) { + dm_bufio_release(buf); + + par = fec_read_parity(v, rsb, block_offset, &offset, &buf); + if (unlikely(IS_ERR(par))) + return PTR_ERR(par); + } + } +done: + r = corrected; +error: + if (r < 0 && neras) + DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", + v->data_dev->name, (unsigned long long)rsb, r); + else if (r > 0) + DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", + v->data_dev->name, (unsigned long long)rsb, r); + + return r; +} + +/* + * Locate data block erasures using verity hashes. + */ +static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io, + u8 *want_digest, u8 *data) +{ + if (unlikely(verity_hash(v, verity_io_hash_desc(v, io), + data, 1 << v->data_dev_block_bits, + verity_io_real_digest(v, io)))) + return 0; + + return memcmp(verity_io_real_digest(v, io), want_digest, + v->digest_size) != 0; +} + +/* + * Read data blocks that are part of the RS block and deinterleave as much as + * fits into buffers. Check for erasure locations if @neras is non-NULL. + */ +static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, + u64 rsb, u64 target, unsigned block_offset, + int *neras) +{ + bool is_zero; + int i, j, target_index = -1; + struct dm_buffer *buf; + struct dm_bufio_client *bufio; + struct dm_verity_fec_io *fio = fec_io(io); + u64 block, ileaved; + u8 *bbuf, *rs_block; + u8 want_digest[v->digest_size]; + unsigned n, k; + + if (neras) + *neras = 0; + + /* + * read each of the rsn data blocks that are part of the RS block, and + * interleave contents to available bufs + */ + for (i = 0; i < v->fec->rsn; i++) { + ileaved = fec_interleave(v, rsb * v->fec->rsn + i); + + /* + * target is the data block we want to correct, target_index is + * the index of this block within the rsn RS blocks + */ + if (ileaved == target) + target_index = i; + + block = ileaved >> v->data_dev_block_bits; + bufio = v->fec->data_bufio; + + if (block >= v->data_blocks) { + block -= v->data_blocks; + + /* + * blocks outside the area were assumed to contain + * zeros when encoding data was generated + */ + if (unlikely(block >= v->fec->hash_blocks)) + continue; + + block += v->hash_start; + bufio = v->bufio; + } + + bbuf = dm_bufio_read(bufio, block, &buf); + if (unlikely(IS_ERR(bbuf))) { + DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld", + v->data_dev->name, + (unsigned long long)rsb, + (unsigned long long)block, PTR_ERR(bbuf)); + + /* assume the block is corrupted */ + if (neras && *neras <= v->fec->roots) + fio->erasures[(*neras)++] = i; + + continue; + } + + /* locate erasures if the block is on the data device */ + if (bufio == v->fec->data_bufio && + verity_hash_for_block(v, io, block, want_digest, + &is_zero) == 0) { + /* skip known zero blocks entirely */ + if (is_zero) + continue; + + /* + * skip if we have already found the theoretical + * maximum number (i.e. fec->roots) of erasures + */ + if (neras && *neras <= v->fec->roots && + fec_is_erasure(v, io, want_digest, bbuf)) + fio->erasures[(*neras)++] = i; + } + + /* + * deinterleave and copy the bytes that fit into bufs, + * starting from block_offset + */ + fec_for_each_buffer_rs_block(fio, n, j) { + k = fec_buffer_rs_index(n, j) + block_offset; + + if (k >= 1 << v->data_dev_block_bits) + goto done; + + rs_block = fec_buffer_rs_block(v, fio, n, j); + rs_block[i] = bbuf[k]; + } +done: + dm_bufio_release(buf); + } + + return target_index; +} + +/* + * Allocate RS control structure and FEC buffers from preallocated mempools, + * and attempt to allocate as many extra buffers as available. + */ +static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) +{ + unsigned n; + + if (!fio->rs) { + fio->rs = mempool_alloc(v->fec->rs_pool, 0); + if (unlikely(!fio->rs)) { + DMERR("failed to allocate RS"); + return -ENOMEM; + } + } + + fec_for_each_prealloc_buffer(n) { + if (fio->bufs[n]) + continue; + + fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); + if (unlikely(!fio->bufs[n])) { + DMERR("failed to allocate FEC buffer"); + return -ENOMEM; + } + } + + /* try to allocate the maximum number of buffers */ + fec_for_each_extra_buffer(fio, n) { + if (fio->bufs[n]) + continue; + + fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); + /* we can manage with even one buffer if necessary */ + if (unlikely(!fio->bufs[n])) + break; + } + fio->nbufs = n; + + if (!fio->output) { + fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); + + if (!fio->output) { + DMERR("failed to allocate FEC page"); + return -ENOMEM; + } + } + + return 0; +} + +/* + * Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are + * zeroed before deinterleaving. + */ +static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) +{ + unsigned n; + + fec_for_each_buffer(fio, n) + memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS); + + memset(fio->erasures, 0, sizeof(fio->erasures)); +} + +/* + * Decode all RS blocks in a single data block and return the target block + * (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses + * hashes to locate erasures. + */ +static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, + struct dm_verity_fec_io *fio, u64 rsb, u64 offset, + bool use_erasures) +{ + int r, neras = 0; + unsigned pos; + + r = fec_alloc_bufs(v, fio); + if (unlikely(r < 0)) + return r; + + for (pos = 0; pos < 1 << v->data_dev_block_bits; ) { + fec_init_bufs(v, fio); + + r = fec_read_bufs(v, io, rsb, offset, pos, + use_erasures ? &neras : NULL); + if (unlikely(r < 0)) + return r; + + r = fec_decode_bufs(v, fio, rsb, r, pos, neras); + if (r < 0) + return r; + + pos += fio->nbufs << DM_VERITY_FEC_BUF_RS_BITS; + } + + /* Always re-validate the corrected block against the expected hash */ + r = verity_hash(v, verity_io_hash_desc(v, io), fio->output, + 1 << v->data_dev_block_bits, + verity_io_real_digest(v, io)); + if (unlikely(r < 0)) + return r; + + if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io), + v->digest_size)) { + DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)", + v->data_dev->name, (unsigned long long)rsb, neras); + return -EILSEQ; + } + + return 0; +} + +static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data, + size_t len) +{ + struct dm_verity_fec_io *fio = fec_io(io); + + memcpy(data, &fio->output[fio->output_pos], len); + fio->output_pos += len; + + return 0; +} + +/* + * Correct errors in a block. Copies corrected block to dest if non-NULL, + * otherwise to a bio_vec starting from iter. + */ +int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, + enum verity_block_type type, sector_t block, u8 *dest, + struct bvec_iter *iter) +{ + int r; + struct dm_verity_fec_io *fio = fec_io(io); + u64 offset, res, rsb; + + if (!verity_fec_is_enabled(v)) + return -EOPNOTSUPP; + + if (type == DM_VERITY_BLOCK_TYPE_METADATA) + block += v->data_blocks; + + /* + * For RS(M, N), the continuous FEC data is divided into blocks of N + * bytes. Since block size may not be divisible by N, the last block + * is zero padded when decoding. + * + * Each byte of the block is covered by a different RS(M, N) code, + * and each code is interleaved over N blocks to make it less likely + * that bursty corruption will leave us in unrecoverable state. + */ + + offset = block << v->data_dev_block_bits; + + res = offset; + div64_u64(res, v->fec->rounds << v->data_dev_block_bits); + + /* + * The base RS block we can feed to the interleaver to find out all + * blocks required for decoding. + */ + rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits); + + /* + * Locating erasures is slow, so attempt to recover the block without + * them first. Do a second attempt with erasures if the corruption is + * bad enough. + */ + r = fec_decode_rsb(v, io, fio, rsb, offset, false); + if (r < 0) { + r = fec_decode_rsb(v, io, fio, rsb, offset, true); + if (r < 0) + return r; + } + + if (dest) + memcpy(dest, fio->output, 1 << v->data_dev_block_bits); + else if (iter) { + fio->output_pos = 0; + r = verity_for_bv_block(v, io, iter, fec_bv_copy); + } + + return r; +} + +/* + * Clean up per-bio data. + */ +void verity_fec_finish_io(struct dm_verity_io *io) +{ + unsigned n; + struct dm_verity_fec *f = io->v->fec; + struct dm_verity_fec_io *fio = fec_io(io); + + if (!verity_fec_is_enabled(io->v)) + return; + + mempool_free(fio->rs, f->rs_pool); + + fec_for_each_prealloc_buffer(n) + mempool_free(fio->bufs[n], f->prealloc_pool); + + fec_for_each_extra_buffer(fio, n) + mempool_free(fio->bufs[n], f->extra_pool); + + mempool_free(fio->output, f->output_pool); +} + +/* + * Initialize per-bio data. + */ +void verity_fec_init_io(struct dm_verity_io *io) +{ + struct dm_verity_fec_io *fio = fec_io(io); + + if (!verity_fec_is_enabled(io->v)) + return; + + fio->rs = NULL; + memset(fio->bufs, 0, sizeof(fio->bufs)); + fio->nbufs = 0; + fio->output = NULL; +} + +/* + * Append feature arguments and values to the status table. + */ +unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz, + char *result, unsigned maxlen) +{ + if (!verity_fec_is_enabled(v)) + return sz; + + DMEMIT(" " DM_VERITY_OPT_FEC_DEV " %s " + DM_VERITY_OPT_FEC_BLOCKS " %llu " + DM_VERITY_OPT_FEC_START " %llu " + DM_VERITY_OPT_FEC_ROOTS " %d", + v->fec->dev->name, + (unsigned long long)v->fec->blocks, + (unsigned long long)v->fec->start, + v->fec->roots); + + return sz; +} + +void verity_fec_dtr(struct dm_verity *v) +{ + struct dm_verity_fec *f = v->fec; + + if (!verity_fec_is_enabled(v)) + goto out; + + mempool_destroy(f->rs_pool); + mempool_destroy(f->prealloc_pool); + mempool_destroy(f->extra_pool); + kmem_cache_destroy(f->cache); + + if (f->data_bufio) + dm_bufio_client_destroy(f->data_bufio); + if (f->bufio) + dm_bufio_client_destroy(f->bufio); + + if (f->dev) + dm_put_device(v->ti, f->dev); +out: + kfree(f); + v->fec = NULL; +} + +static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data) +{ + struct dm_verity *v = (struct dm_verity *)pool_data; + + return init_rs(8, 0x11d, 0, 1, v->fec->roots); +} + +static void fec_rs_free(void *element, void *pool_data) +{ + struct rs_control *rs = (struct rs_control *)element; + + if (rs) + free_rs(rs); +} + +bool verity_is_fec_opt_arg(const char *arg_name) +{ + return (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV) || + !strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS) || + !strcasecmp(arg_name, DM_VERITY_OPT_FEC_START) || + !strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)); +} + +int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, + unsigned *argc, const char *arg_name) +{ + int r; + struct dm_target *ti = v->ti; + const char *arg_value; + unsigned long long num_ll; + unsigned char num_c; + char dummy; + + if (!*argc) { + ti->error = "FEC feature arguments require a value"; + return -EINVAL; + } + + arg_value = dm_shift_arg(as); + (*argc)--; + + if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) { + r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev); + if (r) { + ti->error = "FEC device lookup failed"; + return r; + } + + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS)) { + if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 || + ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) + >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) { + ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; + return -EINVAL; + } + v->fec->blocks = num_ll; + + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START)) { + if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 || + ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >> + (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) { + ti->error = "Invalid " DM_VERITY_OPT_FEC_START; + return -EINVAL; + } + v->fec->start = num_ll; + + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)) { + if (sscanf(arg_value, "%hhu%c", &num_c, &dummy) != 1 || !num_c || + num_c < (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MAX_RSN) || + num_c > (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN)) { + ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS; + return -EINVAL; + } + v->fec->roots = num_c; + + } else { + ti->error = "Unrecognized verity FEC feature request"; + return -EINVAL; + } + + return 0; +} + +/* + * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr. + */ +int verity_fec_ctr_alloc(struct dm_verity *v) +{ + struct dm_verity_fec *f; + + f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL); + if (!f) { + v->ti->error = "Cannot allocate FEC structure"; + return -ENOMEM; + } + v->fec = f; + + return 0; +} + +/* + * Validate arguments and preallocate memory. Must be called after arguments + * have been parsed using verity_fec_parse_opt_args. + */ +int verity_fec_ctr(struct dm_verity *v) +{ + struct dm_verity_fec *f = v->fec; + struct dm_target *ti = v->ti; + u64 hash_blocks; + + if (!verity_fec_is_enabled(v)) { + verity_fec_dtr(v); + return 0; + } + + /* + * FEC is computed over data blocks, possible metadata, and + * hash blocks. In other words, FEC covers total of fec_blocks + * blocks consisting of the following: + * + * data blocks | hash blocks | metadata (optional) + * + * We allow metadata after hash blocks to support a use case + * where all data is stored on the same device and FEC covers + * the entire area. + * + * If metadata is included, we require it to be available on the + * hash device after the hash blocks. + */ + + hash_blocks = v->hash_blocks - v->hash_start; + + /* + * Require matching block sizes for data and hash devices for + * simplicity. + */ + if (v->data_dev_block_bits != v->hash_dev_block_bits) { + ti->error = "Block sizes must match to use FEC"; + return -EINVAL; + } + + if (!f->roots) { + ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS; + return -EINVAL; + } + f->rsn = DM_VERITY_FEC_RSM - f->roots; + + if (!f->blocks) { + ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS; + return -EINVAL; + } + + f->rounds = f->blocks; + if (sector_div(f->rounds, f->rsn)) + f->rounds++; + + /* + * Due to optional metadata, f->blocks can be larger than + * data_blocks and hash_blocks combined. + */ + if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) { + ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; + return -EINVAL; + } + + /* + * Metadata is accessed through the hash device, so we require + * it to be large enough. + */ + f->hash_blocks = f->blocks - v->data_blocks; + if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) { + ti->error = "Hash device is too small for " + DM_VERITY_OPT_FEC_BLOCKS; + return -E2BIG; + } + + f->bufio = dm_bufio_client_create(f->dev->bdev, + 1 << v->data_dev_block_bits, + 1, 0, NULL, NULL); + if (IS_ERR(f->bufio)) { + ti->error = "Cannot initialize FEC bufio client"; + return PTR_ERR(f->bufio); + } + + if (dm_bufio_get_device_size(f->bufio) < + ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) { + ti->error = "FEC device is too small"; + return -E2BIG; + } + + f->data_bufio = dm_bufio_client_create(v->data_dev->bdev, + 1 << v->data_dev_block_bits, + 1, 0, NULL, NULL); + if (IS_ERR(f->data_bufio)) { + ti->error = "Cannot initialize FEC data bufio client"; + return PTR_ERR(f->data_bufio); + } + + if (dm_bufio_get_device_size(f->data_bufio) < v->data_blocks) { + ti->error = "Data device is too small"; + return -E2BIG; + } + + /* Preallocate an rs_control structure for each worker thread */ + f->rs_pool = mempool_create(num_online_cpus(), fec_rs_alloc, + fec_rs_free, (void *) v); + if (!f->rs_pool) { + ti->error = "Cannot allocate RS pool"; + return -ENOMEM; + } + + f->cache = kmem_cache_create("dm_verity_fec_buffers", + f->rsn << DM_VERITY_FEC_BUF_RS_BITS, + 0, 0, NULL); + if (!f->cache) { + ti->error = "Cannot create FEC buffer cache"; + return -ENOMEM; + } + + /* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */ + f->prealloc_pool = mempool_create_slab_pool(num_online_cpus() * + DM_VERITY_FEC_BUF_PREALLOC, + f->cache); + if (!f->prealloc_pool) { + ti->error = "Cannot allocate FEC buffer prealloc pool"; + return -ENOMEM; + } + + f->extra_pool = mempool_create_slab_pool(0, f->cache); + if (!f->extra_pool) { + ti->error = "Cannot allocate FEC buffer extra pool"; + return -ENOMEM; + } + + /* Preallocate an output buffer for each thread */ + f->output_pool = mempool_create_kmalloc_pool(num_online_cpus(), + 1 << v->data_dev_block_bits); + if (!f->output_pool) { + ti->error = "Cannot allocate FEC output pool"; + return -ENOMEM; + } + + /* Reserve space for our per-bio data */ + ti->per_bio_data_size += sizeof(struct dm_verity_fec_io); + + return 0; +} diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h new file mode 100644 index 000000000000..7fa0298b995e --- /dev/null +++ b/drivers/md/dm-verity-fec.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * Author: Sami Tolvanen <samitolvanen@google.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef DM_VERITY_FEC_H +#define DM_VERITY_FEC_H + +#include "dm-verity.h" +#include <linux/rslib.h> + +/* Reed-Solomon(M, N) parameters */ +#define DM_VERITY_FEC_RSM 255 +#define DM_VERITY_FEC_MAX_RSN 253 +#define DM_VERITY_FEC_MIN_RSN 231 /* ~10% space overhead */ + +/* buffers for deinterleaving and decoding */ +#define DM_VERITY_FEC_BUF_PREALLOC 1 /* buffers to preallocate */ +#define DM_VERITY_FEC_BUF_RS_BITS 4 /* 1 << RS blocks per buffer */ +/* we need buffers for at most 1 << block size RS blocks */ +#define DM_VERITY_FEC_BUF_MAX \ + (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) + +#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" +#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" +#define DM_VERITY_OPT_FEC_START "fec_start" +#define DM_VERITY_OPT_FEC_ROOTS "fec_roots" + +/* configuration */ +struct dm_verity_fec { + struct dm_dev *dev; /* parity data device */ + struct dm_bufio_client *data_bufio; /* for data dev access */ + struct dm_bufio_client *bufio; /* for parity data access */ + sector_t start; /* parity data start in blocks */ + sector_t blocks; /* number of blocks covered */ + sector_t rounds; /* number of interleaving rounds */ + sector_t hash_blocks; /* blocks covered after v->hash_start */ + unsigned char roots; /* number of parity bytes, M-N of RS(M, N) */ + unsigned char rsn; /* N of RS(M, N) */ + mempool_t *rs_pool; /* mempool for fio->rs */ + mempool_t *prealloc_pool; /* mempool for preallocated buffers */ + mempool_t *extra_pool; /* mempool for extra buffers */ + mempool_t *output_pool; /* mempool for output */ + struct kmem_cache *cache; /* cache for buffers */ +}; + +/* per-bio data */ +struct dm_verity_fec_io { + struct rs_control *rs; /* Reed-Solomon state */ + int erasures[DM_VERITY_FEC_MAX_RSN]; /* erasures for decode_rs8 */ + u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */ + unsigned nbufs; /* number of buffers allocated */ + u8 *output; /* buffer for corrected output */ + size_t output_pos; +}; + +#ifdef CONFIG_DM_VERITY_FEC + +/* each feature parameter requires a value */ +#define DM_VERITY_OPTS_FEC 8 + +extern bool verity_fec_is_enabled(struct dm_verity *v); + +extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, + enum verity_block_type type, sector_t block, + u8 *dest, struct bvec_iter *iter); + +extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz, + char *result, unsigned maxlen); + +extern void verity_fec_finish_io(struct dm_verity_io *io); +extern void verity_fec_init_io(struct dm_verity_io *io); + +extern bool verity_is_fec_opt_arg(const char *arg_name); +extern int verity_fec_parse_opt_args(struct dm_arg_set *as, + struct dm_verity *v, unsigned *argc, + const char *arg_name); + +extern void verity_fec_dtr(struct dm_verity *v); + +extern int verity_fec_ctr_alloc(struct dm_verity *v); +extern int verity_fec_ctr(struct dm_verity *v); + +#else /* !CONFIG_DM_VERITY_FEC */ + +#define DM_VERITY_OPTS_FEC 0 + +static inline bool verity_fec_is_enabled(struct dm_verity *v) +{ + return false; +} + +static inline int verity_fec_decode(struct dm_verity *v, + struct dm_verity_io *io, + enum verity_block_type type, + sector_t block, u8 *dest, + struct bvec_iter *iter) +{ + return -EOPNOTSUPP; +} + +static inline unsigned verity_fec_status_table(struct dm_verity *v, + unsigned sz, char *result, + unsigned maxlen) +{ + return sz; +} + +static inline void verity_fec_finish_io(struct dm_verity_io *io) +{ +} + +static inline void verity_fec_init_io(struct dm_verity_io *io) +{ +} + +static inline bool verity_is_fec_opt_arg(const char *arg_name) +{ + return false; +} + +static inline int verity_fec_parse_opt_args(struct dm_arg_set *as, + struct dm_verity *v, + unsigned *argc, + const char *arg_name) +{ + return -EINVAL; +} + +static inline void verity_fec_dtr(struct dm_verity *v) +{ +} + +static inline int verity_fec_ctr_alloc(struct dm_verity *v) +{ + return 0; +} + +static inline int verity_fec_ctr(struct dm_verity *v) +{ + return 0; +} + +#endif /* CONFIG_DM_VERITY_FEC */ + +#endif /* DM_VERITY_FEC_H */ diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity-target.c index ccf41886ebcf..5c5d30cb6ec5 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity-target.c @@ -14,12 +14,11 @@ * access behavior. */ -#include "dm-bufio.h" +#include "dm-verity.h" +#include "dm-verity-fec.h" #include <linux/module.h> -#include <linux/device-mapper.h> #include <linux/reboot.h> -#include <crypto/hash.h> #define DM_MSG_PREFIX "verity" @@ -28,83 +27,18 @@ #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144 -#define DM_VERITY_MAX_LEVELS 63 #define DM_VERITY_MAX_CORRUPTED_ERRS 100 #define DM_VERITY_OPT_LOGGING "ignore_corruption" #define DM_VERITY_OPT_RESTART "restart_on_corruption" +#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" + +#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC) static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR); -enum verity_mode { - DM_VERITY_MODE_EIO, - DM_VERITY_MODE_LOGGING, - DM_VERITY_MODE_RESTART -}; - -enum verity_block_type { - DM_VERITY_BLOCK_TYPE_DATA, - DM_VERITY_BLOCK_TYPE_METADATA -}; - -struct dm_verity { - struct dm_dev *data_dev; - struct dm_dev *hash_dev; - struct dm_target *ti; - struct dm_bufio_client *bufio; - char *alg_name; - struct crypto_shash *tfm; - u8 *root_digest; /* digest of the root block */ - u8 *salt; /* salt: its size is salt_size */ - unsigned salt_size; - sector_t data_start; /* data offset in 512-byte sectors */ - sector_t hash_start; /* hash start in blocks */ - sector_t data_blocks; /* the number of data blocks */ - sector_t hash_blocks; /* the number of hash blocks */ - unsigned char data_dev_block_bits; /* log2(data blocksize) */ - unsigned char hash_dev_block_bits; /* log2(hash blocksize) */ - unsigned char hash_per_block_bits; /* log2(hashes in hash block) */ - unsigned char levels; /* the number of tree levels */ - unsigned char version; - unsigned digest_size; /* digest size for the current hash algorithm */ - unsigned shash_descsize;/* the size of temporary space for crypto */ - int hash_failed; /* set to 1 if hash of any block failed */ - enum verity_mode mode; /* mode for handling verification errors */ - unsigned corrupted_errs;/* Number of errors for corrupted blocks */ - - struct workqueue_struct *verify_wq; - - /* starting blocks for each tree level. 0 is the lowest level. */ - sector_t hash_level_block[DM_VERITY_MAX_LEVELS]; -}; - -struct dm_verity_io { - struct dm_verity *v; - - /* original values of bio->bi_end_io and bio->bi_private */ - bio_end_io_t *orig_bi_end_io; - void *orig_bi_private; - - sector_t block; - unsigned n_blocks; - - struct bvec_iter iter; - - struct work_struct work; - - /* - * Three variably-size fields follow this struct: - * - * u8 hash_desc[v->shash_descsize]; - * u8 real_digest[v->digest_size]; - * u8 want_digest[v->digest_size]; - * - * To access them use: io_hash_desc(), io_real_digest() and io_want_digest(). - */ -}; - struct dm_verity_prefetch_work { struct work_struct work; struct dm_verity *v; @@ -112,21 +46,6 @@ struct dm_verity_prefetch_work { unsigned n_blocks; }; -static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) -{ - return (struct shash_desc *)(io + 1); -} - -static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io) -{ - return (u8 *)(io + 1) + v->shash_descsize; -} - -static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io) -{ - return (u8 *)(io + 1) + v->shash_descsize + v->digest_size; -} - /* * Auxiliary structure appended to each dm-bufio buffer. If the value * hash_verified is nonzero, hash of the block has been verified. @@ -173,6 +92,84 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, return block >> (level * v->hash_per_block_bits); } +/* + * Wrapper for crypto_shash_init, which handles verity salting. + */ +static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc) +{ + int r; + + desc->tfm = v->tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + r = crypto_shash_init(desc); + + if (unlikely(r < 0)) { + DMERR("crypto_shash_init failed: %d", r); + return r; + } + + if (likely(v->version >= 1)) { + r = crypto_shash_update(desc, v->salt, v->salt_size); + + if (unlikely(r < 0)) { + DMERR("crypto_shash_update failed: %d", r); + return r; + } + } + + return 0; +} + +static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc, + const u8 *data, size_t len) +{ + int r = crypto_shash_update(desc, data, len); + + if (unlikely(r < 0)) + DMERR("crypto_shash_update failed: %d", r); + + return r; +} + +static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc, + u8 *digest) +{ + int r; + + if (unlikely(!v->version)) { + r = crypto_shash_update(desc, v->salt, v->salt_size); + + if (r < 0) { + DMERR("crypto_shash_update failed: %d", r); + return r; + } + } + + r = crypto_shash_final(desc, digest); + + if (unlikely(r < 0)) + DMERR("crypto_shash_final failed: %d", r); + + return r; +} + +int verity_hash(struct dm_verity *v, struct shash_desc *desc, + const u8 *data, size_t len, u8 *digest) +{ + int r; + + r = verity_hash_init(v, desc); + if (unlikely(r < 0)) + return r; + + r = verity_hash_update(v, desc, data, len); + if (unlikely(r < 0)) + return r; + + return verity_hash_final(v, desc, digest); +} + static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level, sector_t *hash_block, unsigned *offset) { @@ -246,17 +243,17 @@ out: * Verify hash of a metadata block pertaining to the specified data block * ("block" argument) at a specified level ("level" argument). * - * On successful return, io_want_digest(v, io) contains the hash value for - * a lower tree level or for the data block (if we're at the lowest leve). + * On successful return, verity_io_want_digest(v, io) contains the hash value + * for a lower tree level or for the data block (if we're at the lowest level). * * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned. * If "skip_unverified" is false, unverified buffer is hashed and verified - * against current value of io_want_digest(v, io). + * against current value of verity_io_want_digest(v, io). */ -static int verity_verify_level(struct dm_verity_io *io, sector_t block, - int level, bool skip_unverified) +static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, + sector_t block, int level, bool skip_unverified, + u8 *want_digest) { - struct dm_verity *v = io->v; struct dm_buffer *buf; struct buffer_aux *aux; u8 *data; @@ -273,72 +270,128 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block, aux = dm_bufio_get_aux_data(buf); if (!aux->hash_verified) { - struct shash_desc *desc; - u8 *result; - if (skip_unverified) { r = 1; goto release_ret_r; } - desc = io_hash_desc(v, io); - desc->tfm = v->tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - r = crypto_shash_init(desc); - if (r < 0) { - DMERR("crypto_shash_init failed: %d", r); + r = verity_hash(v, verity_io_hash_desc(v, io), + data, 1 << v->hash_dev_block_bits, + verity_io_real_digest(v, io)); + if (unlikely(r < 0)) goto release_ret_r; - } - - if (likely(v->version >= 1)) { - r = crypto_shash_update(desc, v->salt, v->salt_size); - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); - goto release_ret_r; - } - } - r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits); - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); + if (likely(memcmp(verity_io_real_digest(v, io), want_digest, + v->digest_size) == 0)) + aux->hash_verified = 1; + else if (verity_fec_decode(v, io, + DM_VERITY_BLOCK_TYPE_METADATA, + hash_block, data, NULL) == 0) + aux->hash_verified = 1; + else if (verity_handle_err(v, + DM_VERITY_BLOCK_TYPE_METADATA, + hash_block)) { + r = -EIO; goto release_ret_r; } + } - if (!v->version) { - r = crypto_shash_update(desc, v->salt, v->salt_size); - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); - goto release_ret_r; - } - } + data += offset; + memcpy(want_digest, data, v->digest_size); + r = 0; - result = io_real_digest(v, io); - r = crypto_shash_final(desc, result); - if (r < 0) { - DMERR("crypto_shash_final failed: %d", r); - goto release_ret_r; - } - if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) { - if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA, - hash_block)) { - r = -EIO; - goto release_ret_r; - } - } else - aux->hash_verified = 1; +release_ret_r: + dm_bufio_release(buf); + return r; +} + +/* + * Find a hash for a given block, write it to digest and verify the integrity + * of the hash tree if necessary. + */ +int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, + sector_t block, u8 *digest, bool *is_zero) +{ + int r = 0, i; + + if (likely(v->levels)) { + /* + * First, we try to get the requested hash for + * the current block. If the hash block itself is + * verified, zero is returned. If it isn't, this + * function returns 1 and we fall back to whole + * chain verification. + */ + r = verity_verify_level(v, io, block, 0, true, digest); + if (likely(r <= 0)) + goto out; } - data += offset; + memcpy(digest, v->root_digest, v->digest_size); - memcpy(io_want_digest(v, io), data, v->digest_size); + for (i = v->levels - 1; i >= 0; i--) { + r = verity_verify_level(v, io, block, i, false, digest); + if (unlikely(r)) + goto out; + } +out: + if (!r && v->zero_digest) + *is_zero = !memcmp(v->zero_digest, digest, v->digest_size); + else + *is_zero = false; + + return r; +} + +/* + * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec + * starting from iter. + */ +int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, + struct bvec_iter *iter, + int (*process)(struct dm_verity *v, + struct dm_verity_io *io, u8 *data, + size_t len)) +{ + unsigned todo = 1 << v->data_dev_block_bits; + struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); + + do { + int r; + u8 *page; + unsigned len; + struct bio_vec bv = bio_iter_iovec(bio, *iter); + + page = kmap_atomic(bv.bv_page); + len = bv.bv_len; + + if (likely(len >= todo)) + len = todo; + + r = process(v, io, page + bv.bv_offset, len); + kunmap_atomic(page); + + if (r < 0) + return r; + + bio_advance_iter(bio, iter, len); + todo -= len; + } while (todo); - dm_bufio_release(buf); return 0; +} -release_ret_r: - dm_bufio_release(buf); +static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io, + u8 *data, size_t len) +{ + return verity_hash_update(v, verity_io_hash_desc(v, io), data, len); +} - return r; +static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io, + u8 *data, size_t len) +{ + memset(data, 0, len); + return 0; } /* @@ -346,99 +399,56 @@ release_ret_r: */ static int verity_verify_io(struct dm_verity_io *io) { + bool is_zero; struct dm_verity *v = io->v; - struct bio *bio = dm_bio_from_per_bio_data(io, - v->ti->per_bio_data_size); + struct bvec_iter start; unsigned b; - int i; for (b = 0; b < io->n_blocks; b++) { - struct shash_desc *desc; - u8 *result; int r; - unsigned todo; + struct shash_desc *desc = verity_io_hash_desc(v, io); + + r = verity_hash_for_block(v, io, io->block + b, + verity_io_want_digest(v, io), + &is_zero); + if (unlikely(r < 0)) + return r; - if (likely(v->levels)) { + if (is_zero) { /* - * First, we try to get the requested hash for - * the current block. If the hash block itself is - * verified, zero is returned. If it isn't, this - * function returns 0 and we fall back to whole - * chain verification. + * If we expect a zero block, don't validate, just + * return zeros. */ - int r = verity_verify_level(io, io->block + b, 0, true); - if (likely(!r)) - goto test_block_hash; - if (r < 0) + r = verity_for_bv_block(v, io, &io->iter, + verity_bv_zero); + if (unlikely(r < 0)) return r; - } - memcpy(io_want_digest(v, io), v->root_digest, v->digest_size); - - for (i = v->levels - 1; i >= 0; i--) { - int r = verity_verify_level(io, io->block + b, i, false); - if (unlikely(r)) - return r; + continue; } -test_block_hash: - desc = io_hash_desc(v, io); - desc->tfm = v->tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - r = crypto_shash_init(desc); - if (r < 0) { - DMERR("crypto_shash_init failed: %d", r); + r = verity_hash_init(v, desc); + if (unlikely(r < 0)) return r; - } - - if (likely(v->version >= 1)) { - r = crypto_shash_update(desc, v->salt, v->salt_size); - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); - return r; - } - } - todo = 1 << v->data_dev_block_bits; - do { - u8 *page; - unsigned len; - struct bio_vec bv = bio_iter_iovec(bio, io->iter); - - page = kmap_atomic(bv.bv_page); - len = bv.bv_len; - if (likely(len >= todo)) - len = todo; - r = crypto_shash_update(desc, page + bv.bv_offset, len); - kunmap_atomic(page); - - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); - return r; - } - - bio_advance_iter(bio, &io->iter, len); - todo -= len; - } while (todo); - if (!v->version) { - r = crypto_shash_update(desc, v->salt, v->salt_size); - if (r < 0) { - DMERR("crypto_shash_update failed: %d", r); - return r; - } - } + start = io->iter; + r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update); + if (unlikely(r < 0)) + return r; - result = io_real_digest(v, io); - r = crypto_shash_final(desc, result); - if (r < 0) { - DMERR("crypto_shash_final failed: %d", r); + r = verity_hash_final(v, desc, verity_io_real_digest(v, io)); + if (unlikely(r < 0)) return r; - } - if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) { - if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, - io->block + b)) - return -EIO; - } + + if (likely(memcmp(verity_io_real_digest(v, io), + verity_io_want_digest(v, io), v->digest_size) == 0)) + continue; + else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, + io->block + b, NULL, &start) == 0) + continue; + else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, + io->block + b)) + return -EIO; } return 0; @@ -453,9 +463,10 @@ static void verity_finish_io(struct dm_verity_io *io, int error) struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); bio->bi_end_io = io->orig_bi_end_io; - bio->bi_private = io->orig_bi_private; bio->bi_error = error; + verity_fec_finish_io(io); + bio_endio(bio); } @@ -470,7 +481,7 @@ static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_error) { + if (bio->bi_error && !verity_fec_is_enabled(io->v)) { verity_finish_io(io, bio->bi_error); return; } @@ -566,7 +577,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio) io = dm_per_bio_data(bio, ti->per_bio_data_size); io->v = v; io->orig_bi_end_io = bio->bi_end_io; - io->orig_bi_private = bio->bi_private; io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; @@ -574,6 +584,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio) bio->bi_private = io; io->iter = bio->bi_iter; + verity_fec_init_io(io); + verity_submit_prefetch(v, io); generic_make_request(bio); @@ -588,6 +600,7 @@ static void verity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; + unsigned args = 0; unsigned sz = 0; unsigned x; @@ -614,8 +627,17 @@ static void verity_status(struct dm_target *ti, status_type_t type, else for (x = 0; x < v->salt_size; x++) DMEMIT("%02x", v->salt[x]); + if (v->mode != DM_VERITY_MODE_EIO) + args++; + if (verity_fec_is_enabled(v)) + args += DM_VERITY_OPTS_FEC; + if (v->zero_digest) + args++; + if (!args) + return; + DMEMIT(" %u", args); if (v->mode != DM_VERITY_MODE_EIO) { - DMEMIT(" 1 "); + DMEMIT(" "); switch (v->mode) { case DM_VERITY_MODE_LOGGING: DMEMIT(DM_VERITY_OPT_LOGGING); @@ -627,6 +649,9 @@ static void verity_status(struct dm_target *ti, status_type_t type, BUG(); } } + if (v->zero_digest) + DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES); + sz = verity_fec_status_table(v, sz, result, maxlen); break; } } @@ -677,6 +702,7 @@ static void verity_dtr(struct dm_target *ti) kfree(v->salt); kfree(v->root_digest); + kfree(v->zero_digest); if (v->tfm) crypto_free_shash(v->tfm); @@ -689,9 +715,94 @@ static void verity_dtr(struct dm_target *ti) if (v->data_dev) dm_put_device(ti, v->data_dev); + verity_fec_dtr(v); + kfree(v); } +static int verity_alloc_zero_digest(struct dm_verity *v) +{ + int r = -ENOMEM; + struct shash_desc *desc; + u8 *zero_data; + + v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL); + + if (!v->zero_digest) + return r; + + desc = kmalloc(v->shash_descsize, GFP_KERNEL); + + if (!desc) + return r; /* verity_dtr will free zero_digest */ + + zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL); + + if (!zero_data) + goto out; + + r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits, + v->zero_digest); + +out: + kfree(desc); + kfree(zero_data); + + return r; +} + +static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) +{ + int r; + unsigned argc; + struct dm_target *ti = v->ti; + const char *arg_name; + + static struct dm_arg _args[] = { + {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, + }; + + r = dm_read_arg_group(_args, as, &argc, &ti->error); + if (r) + return -EINVAL; + + if (!argc) + return 0; + + do { + arg_name = dm_shift_arg(as); + argc--; + + if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) { + v->mode = DM_VERITY_MODE_LOGGING; + continue; + + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) { + v->mode = DM_VERITY_MODE_RESTART; + continue; + + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) { + r = verity_alloc_zero_digest(v); + if (r) { + ti->error = "Cannot allocate zero digest"; + return r; + } + continue; + + } else if (verity_is_fec_opt_arg(arg_name)) { + r = verity_fec_parse_opt_args(as, v, &argc, arg_name); + if (r) + return r; + continue; + } + + ti->error = "Unrecognized verity feature request"; + return -EINVAL; + } while (argc && !r); + + return r; +} + /* * Target parameters: * <version> The current format is version 1. @@ -710,18 +821,13 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; struct dm_arg_set as; - const char *opt_string; - unsigned int num, opt_params; + unsigned int num; unsigned long long num_ll; int r; int i; sector_t hash_position; char dummy; - static struct dm_arg _args[] = { - {0, 1, "Invalid number of feature args"}, - }; - v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL); if (!v) { ti->error = "Cannot allocate verity structure"; @@ -730,6 +836,10 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->private = v; v->ti = ti; + r = verity_fec_ctr_alloc(v); + if (r) + goto bad; + if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) { ti->error = "Device must be readonly"; r = -EINVAL; @@ -866,29 +976,9 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) as.argc = argc; as.argv = argv; - r = dm_read_arg_group(_args, &as, &opt_params, &ti->error); - if (r) + r = verity_parse_opt_args(&as, v); + if (r < 0) goto bad; - - while (opt_params) { - opt_params--; - opt_string = dm_shift_arg(&as); - if (!opt_string) { - ti->error = "Not enough feature arguments"; - r = -EINVAL; - goto bad; - } - - if (!strcasecmp(opt_string, DM_VERITY_OPT_LOGGING)) - v->mode = DM_VERITY_MODE_LOGGING; - else if (!strcasecmp(opt_string, DM_VERITY_OPT_RESTART)) - v->mode = DM_VERITY_MODE_RESTART; - else { - ti->error = "Invalid feature arguments"; - r = -EINVAL; - goto bad; - } - } } v->hash_per_block_bits = @@ -938,8 +1028,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io)); - /* WQ_UNBOUND greatly improves performance when running on ramdisk */ v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus()); if (!v->verify_wq) { @@ -948,6 +1036,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + ti->per_bio_data_size = sizeof(struct dm_verity_io) + + v->shash_descsize + v->digest_size * 2; + + r = verity_fec_ctr(v); + if (r) + goto bad; + + ti->per_bio_data_size = roundup(ti->per_bio_data_size, + __alignof__(struct dm_verity_io)); + return 0; bad: @@ -958,7 +1056,7 @@ bad: static struct target_type verity_target = { .name = "verity", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h new file mode 100644 index 000000000000..fb419f422d73 --- /dev/null +++ b/drivers/md/dm-verity.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2012 Red Hat, Inc. + * Copyright (C) 2015 Google, Inc. + * + * Author: Mikulas Patocka <mpatocka@redhat.com> + * + * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors + * + * This file is released under the GPLv2. + */ + +#ifndef DM_VERITY_H +#define DM_VERITY_H + +#include "dm-bufio.h" +#include <linux/device-mapper.h> +#include <crypto/hash.h> + +#define DM_VERITY_MAX_LEVELS 63 + +enum verity_mode { + DM_VERITY_MODE_EIO, + DM_VERITY_MODE_LOGGING, + DM_VERITY_MODE_RESTART +}; + +enum verity_block_type { + DM_VERITY_BLOCK_TYPE_DATA, + DM_VERITY_BLOCK_TYPE_METADATA +}; + +struct dm_verity_fec; + +struct dm_verity { + struct dm_dev *data_dev; + struct dm_dev *hash_dev; + struct dm_target *ti; + struct dm_bufio_client *bufio; + char *alg_name; + struct crypto_shash *tfm; + u8 *root_digest; /* digest of the root block */ + u8 *salt; /* salt: its size is salt_size */ + u8 *zero_digest; /* digest for a zero block */ + unsigned salt_size; + sector_t data_start; /* data offset in 512-byte sectors */ + sector_t hash_start; /* hash start in blocks */ + sector_t data_blocks; /* the number of data blocks */ + sector_t hash_blocks; /* the number of hash blocks */ + unsigned char data_dev_block_bits; /* log2(data blocksize) */ + unsigned char hash_dev_block_bits; /* log2(hash blocksize) */ + unsigned char hash_per_block_bits; /* log2(hashes in hash block) */ + unsigned char levels; /* the number of tree levels */ + unsigned char version; + unsigned digest_size; /* digest size for the current hash algorithm */ + unsigned shash_descsize;/* the size of temporary space for crypto */ + int hash_failed; /* set to 1 if hash of any block failed */ + enum verity_mode mode; /* mode for handling verification errors */ + unsigned corrupted_errs;/* Number of errors for corrupted blocks */ + + struct workqueue_struct *verify_wq; + + /* starting blocks for each tree level. 0 is the lowest level. */ + sector_t hash_level_block[DM_VERITY_MAX_LEVELS]; + + struct dm_verity_fec *fec; /* forward error correction */ +}; + +struct dm_verity_io { + struct dm_verity *v; + + /* original value of bio->bi_end_io */ + bio_end_io_t *orig_bi_end_io; + + sector_t block; + unsigned n_blocks; + + struct bvec_iter iter; + + struct work_struct work; + + /* + * Three variably-size fields follow this struct: + * + * u8 hash_desc[v->shash_descsize]; + * u8 real_digest[v->digest_size]; + * u8 want_digest[v->digest_size]; + * + * To access them use: verity_io_hash_desc(), verity_io_real_digest() + * and verity_io_want_digest(). + */ +}; + +static inline struct shash_desc *verity_io_hash_desc(struct dm_verity *v, + struct dm_verity_io *io) +{ + return (struct shash_desc *)(io + 1); +} + +static inline u8 *verity_io_real_digest(struct dm_verity *v, + struct dm_verity_io *io) +{ + return (u8 *)(io + 1) + v->shash_descsize; +} + +static inline u8 *verity_io_want_digest(struct dm_verity *v, + struct dm_verity_io *io) +{ + return (u8 *)(io + 1) + v->shash_descsize + v->digest_size; +} + +static inline u8 *verity_io_digest_end(struct dm_verity *v, + struct dm_verity_io *io) +{ + return verity_io_want_digest(v, io) + v->digest_size; +} + +extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, + struct bvec_iter *iter, + int (*process)(struct dm_verity *v, + struct dm_verity_io *io, + u8 *data, size_t len)); + +extern int verity_hash(struct dm_verity *v, struct shash_desc *desc, + const u8 *data, size_t len, u8 *digest); + +extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, + sector_t block, u8 *digest, bool *is_zero); + +#endif /* DM_VERITY_H */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 807095f4c793..61aacab424cf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) */ void mddev_suspend(struct mddev *mddev) { - BUG_ON(mddev->suspended); - mddev->suspended = 1; + if (mddev->suspended++) + return; synchronize_rcu(); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); @@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend); void mddev_resume(struct mddev *mddev) { - mddev->suspended = 0; + if (--mddev->suspended) + return; wake_up(&mddev->sb_wait); mddev->pers->quiesce(mddev, 0); @@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) rdev->journal_tail = le64_to_cpu(sb->journal_tail); if (mddev->recovery_cp == MaxSector) set_bit(MD_JOURNAL_CLEAN, &mddev->flags); - rdev->raid_disk = mddev->raid_disks; + rdev->raid_disk = 0; break; default: rdev->saved_raid_disk = role; @@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) /* Activating a spare .. or possibly reactivating * if we ever get bitmaps working here. */ + int err; if (rdev->raid_disk != -1) return -EBUSY; @@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); - remove_and_add_spares(rdev->mddev, rdev); - if (rdev->raid_disk == -1) - return -EBUSY; + err = rdev->mddev->pers-> + hot_add_disk(rdev->mddev, rdev); + if (err) { + rdev->raid_disk = -1; + return err; + } else + sysfs_notify_dirent_safe(rdev->sysfs_state); + if (sysfs_link_rdev(rdev->mddev, rdev)) + /* failure here is OK */; /* don't wakeup anyone, leave that to userspace. */ } else { if (slot >= rdev->mddev->raid_disks && @@ -4318,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) } mddev_unlock(mddev); } - } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; else if (cmd_match(page, "resync")) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -4332,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; err = mddev_lock(mddev); if (!err) { - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - err = mddev->pers->start_reshape(mddev); + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + err = -EBUSY; + else { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + err = mddev->pers->start_reshape(mddev); + } mddev_unlock(mddev); } if (err) diff --git a/drivers/md/md.h b/drivers/md/md.h index 2bea51edfab7..ca0b643fe3c1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev) static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { + if (!test_bit(Replacement, &rdev->flags) && + !test_bit(Journal, &rdev->flags) && + mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); } else @@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { + if (!test_bit(Replacement, &rdev->flags) && + !test_bit(Journal, &rdev->flags) && + mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); sysfs_remove_link(&mddev->kobj, nm); } diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 78c74bb71ba4..a53cbc928af1 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -7,12 +7,3 @@ config DM_PERSISTENT_DATA Library providing immutable on-disk data structure support for device-mapper targets such as the thin provisioning target. -config DM_DEBUG_BLOCK_STACK_TRACING - bool "Keep stack trace of persistent data block lock holders" - depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA - select STACKTRACE - ---help--- - Enable this for messages that may help debug problems with the - block manager locking used by thin provisioning and caching. - - If unsure, say N. diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index f2393ba838eb..1e33dd51c21f 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -97,10 +97,6 @@ static void __del_holder(struct block_lock *lock, struct task_struct *task) static int __check_holder(struct block_lock *lock) { unsigned i; -#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - static struct stack_trace t; - static stack_entries entries; -#endif for (i = 0; i < MAX_HOLDERS; i++) { if (lock->holders[i] == current) { @@ -110,12 +106,7 @@ static int __check_holder(struct block_lock *lock) print_stack_trace(lock->traces + i, 4); DMERR("subsequent acquisition attempted here:"); - t.nr_entries = 0; - t.max_entries = MAX_STACK; - t.entries = entries; - t.skip = 3; - save_stack_trace(&t); - print_stack_trace(&t, 4); + dump_stack(); #endif return -EINVAL; } diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index b1ced58eb5e1..ea3d3b656fd0 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -754,12 +754,19 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, return 0; } +static bool need_insert(struct btree_node *node, uint64_t *keys, + unsigned level, unsigned index) +{ + return ((index >= le32_to_cpu(node->header.nr_entries)) || + (le64_to_cpu(node->keys[index]) != keys[level])); +} + static int insert(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, void *value, dm_block_t *new_root, int *inserted) __dm_written_to_disk(value) { - int r, need_insert; + int r; unsigned level, index = -1, last_level = info->levels - 1; dm_block_t block = root; struct shadow_spine spine; @@ -775,10 +782,8 @@ static int insert(struct dm_btree_info *info, dm_block_t root, goto bad; n = dm_block_data(shadow_current(&spine)); - need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || - (le64_to_cpu(n->keys[index]) != keys[level])); - if (need_insert) { + if (need_insert(n, keys, level, index)) { dm_block_t new_tree; __le64 new_le; @@ -805,10 +810,8 @@ static int insert(struct dm_btree_info *info, dm_block_t root, goto bad; n = dm_block_data(shadow_current(&spine)); - need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || - (le64_to_cpu(n->keys[index]) != keys[level])); - if (need_insert) { + if (need_insert(n, keys, level, index)) { if (inserted) *inserted = 1; diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index fca6dbcf9a47..7e44005595c1 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result) static int brb_pop(struct bop_ring_buffer *brb) { - struct block_op *bop; - if (brb_empty(brb)) return -ENODATA; - bop = brb->bops + brb->begin; brb->begin = brb_next(brb, brb->begin); return 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 41d70bc9ba2f..84e597e1c489 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) first = i; fbio = r10_bio->devs[i].bio; + fbio->bi_iter.bi_size = r10_bio->sectors << 9; + fbio->bi_iter.bi_idx = 0; vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); /* now find blocks with errors */ @@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_reset(tbio); tbio->bi_vcnt = vcnt; - tbio->bi_iter.bi_size = r10_bio->sectors << 9; + tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; tbio->bi_rw = WRITE; tbio->bi_private = r10_bio; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 3ef3d6c6bbf8..9264ea73b3be 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -9,7 +9,7 @@ menuconfig MEDIA_SUPPORT If you want to use Webcams, Video grabber devices and/or TV devices enable this option and other options below. Additional info and docs are available on the web at - <http://linuxtv.org> + <https://linuxtv.org> if MEDIA_SUPPORT @@ -51,7 +51,7 @@ config MEDIA_RADIO_SUPPORT Enable AM/FM radio support. Additional info and docs are available on the web at - <http://linuxtv.org> + <https://linuxtv.org> Say Y when you have a board with radio support. diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c index c07b9db51b05..5e4afa0131e6 100644 --- a/drivers/media/common/cx2341x.c +++ b/drivers/media/common/cx2341x.c @@ -27,7 +27,7 @@ #include <linux/videodev2.h> #include <media/tuner.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include <media/v4l2-common.h> MODULE_DESCRIPTION("cx23415/6/8 driver"); diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c index 1ff9f5323bc3..9f7c5b0a6b45 100644 --- a/drivers/media/common/saa7146/saa7146_core.c +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -20,7 +20,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <media/saa7146.h> +#include <media/drv-intf/saa7146.h> #include <linux/module.h> static int saa7146_num; diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index df1e8c975cd8..930d2c94d5d3 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -1,6 +1,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <linux/module.h> /****************************************************************************/ diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/media/common/saa7146/saa7146_hlp.c index 3dc6a838ca6f..6ebcbc6450f5 100644 --- a/drivers/media/common/saa7146/saa7146_hlp.c +++ b/drivers/media/common/saa7146/saa7146_hlp.c @@ -2,7 +2,7 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format) { diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c index 22027198129d..239a2db35068 100644 --- a/drivers/media/common/saa7146/saa7146_i2c.c +++ b/drivers/media/common/saa7146/saa7146_i2c.c @@ -1,6 +1,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> static u32 saa7146_i2c_func(struct i2c_adapter *adapter) { diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c index 2da995758778..49237518d65f 100644 --- a/drivers/media/common/saa7146/saa7146_vbi.c +++ b/drivers/media/common/saa7146/saa7146_vbi.c @@ -1,4 +1,4 @@ -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> static int vbi_pixel_to_capture = 720 * 2; diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c index 30779498c173..d5837be3e8cf 100644 --- a/drivers/media/common/saa7146/saa7146_video.c +++ b/drivers/media/common/saa7146/saa7146_video.c @@ -1,6 +1,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <media/v4l2-event.h> #include <media/v4l2-ctrls.h> #include <linux/module.h> diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h index fc8b7925c532..d9abd96ef48b 100644 --- a/drivers/media/common/siano/smsir.h +++ b/drivers/media/common/siano/smsir.h @@ -30,8 +30,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>. #include <linux/input.h> #include <media/rc-core.h> -#define IR_DEFAULT_TIMEOUT 100 - struct smscore_device_t; struct ir_t { diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h index ccc1f43cb9a9..6d3b95b8939d 100644 --- a/drivers/media/dvb-core/demux.h +++ b/drivers/media/dvb-core/demux.h @@ -32,6 +32,49 @@ #include <linux/time.h> #include <linux/dvb/dmx.h> +/** + * DOC: Digital TV Demux + * + * The Kernel Digital TV Demux kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * demux layer. It is only of interest for Digital TV device driver writers. + * The header file for this kABI is named demux.h and located in + * drivers/media/dvb-core. + * + * The demux kABI should be implemented for each demux in the system. It is + * used to select the TS source of a demux and to manage the demux resources. + * When the demux client allocates a resource via the demux kABI, it receives + * a pointer to the kABI of that resource. + * + * Each demux receives its TS input from a DVB front-end or from memory, as + * set via this demux kABI. In a system with more than one front-end, the kABI + * can be used to select one of the DVB front-ends as a TS source for a demux, + * unless this is fixed in the HW platform. + * + * The demux kABI only controls front-ends regarding to their connections with + * demuxes; the kABI used to set the other front-end parameters, such as + * tuning, are devined via the Digital TV Frontend kABI. + * + * The functions that implement the abstract interface demux should be defined + * static or module private and registered to the Demux core for external + * access. It is not necessary to implement every function in the struct + * &dmx_demux. For example, a demux interface might support Section filtering, + * but not PES filtering. The kABI client is expected to check the value of any + * function pointer before calling the function: the value of NULL means + * that the function is not available. + * + * Whenever the functions of the demux API modify shared data, the + * possibilities of lost update and race condition problems should be + * addressed, e.g. by protecting parts of code with mutexes. + * + * Note that functions called from a bottom half context must not sleep. + * Even a simple memory allocation without using %GFP_ATOMIC can result in a + * kernel thread being put to sleep if swapping is needed. For example, the + * Linux Kernel calls the functions of a network device interface from a + * bottom half context. Thus, if a demux kABI function is called from network + * device code, the function must not sleep. + */ + /* * Common definitions */ @@ -187,8 +230,28 @@ struct dmx_section_feed { int (*stop_filtering)(struct dmx_section_feed *feed); }; -/* - * Callback functions +/** + * DOC: Demux Callback + * + * This kernel-space API comprises the callback functions that deliver filtered + * data to the demux client. Unlike the other DVB kABIs, these functions are + * provided by the client and called from the demux code. + * + * The function pointers of this abstract interface are not packed into a + * structure as in the other demux APIs, because the callback functions are + * registered and used independent of each other. As an example, it is possible + * for the API client to provide several callback functions for receiving TS + * packets and no callbacks for PES packets or sections. + * + * The functions that implement the callback API need not be re-entrant: when + * a demux driver calls one of these functions, the driver is not allowed to + * call the function again before the original call returns. If a callback is + * triggered by a hardware interrupt, it is recommended to use the Linux + * bottom half mechanism or start a tasklet instead of making the callback + * function call directly from a hardware interrupt. + * + * This mechanism is implemented by dmx_ts_cb() and dmx_section_cb() + * callbacks. */ /** diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h index 0a46580b5376..1c1c298d2289 100644 --- a/drivers/media/dvb-core/dvb-usb-ids.h +++ b/drivers/media/dvb-core/dvb-usb-ids.h @@ -389,4 +389,5 @@ #define USB_PID_PCTV_2002E_SE 0x025d #define USB_PID_SVEON_STV27 0xd3af #define USB_PID_TURBOX_DTT_2000 0xd3a4 +#define USB_PID_WINTV_SOLOHD 0x0264 #endif diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index c38ef1a72b4a..b64f33776b74 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -891,21 +891,21 @@ static void dvb_frontend_stop(struct dvb_frontend *fe) } /* - * Sleep until gettimeofday() > waketime + add_usec - * This needs to be as precise as possible, but as the delay is - * usually between 2ms and 32ms, it is done using a scheduled msleep - * followed by usleep (normally a busy-wait loop) for the remainder + * Sleep for the amount of time given by add_usec parameter + * + * This needs to be as precise as possible, as it affects the detection of + * the dish tone command at the satellite subsystem. The precision is improved + * by using a scheduled msleep followed by udelay for the remainder. */ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec) { - s32 delta, newdelta; + s32 delta; - ktime_add_us(*waketime, add_usec); + *waketime = ktime_add_us(*waketime, add_usec); delta = ktime_us_delta(ktime_get_real(), *waketime); if (delta > 2500) { msleep((delta - 1500) / 1000); - newdelta = ktime_us_delta(ktime_get_real(), *waketime); - delta = (newdelta > delta) ? 0 : newdelta; + delta = ktime_us_delta(ktime_get_real(), *waketime); } if (delta > 0) udelay(delta); @@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file, dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", __func__, c->delivery_system, fe->ops.info.type); - /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't - * do it, it is done for it. */ - info->caps |= FE_CAN_INVERSION_AUTO; + /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ + if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) + info->caps |= FE_CAN_INVERSION_AUTO; err = 0; break; } @@ -2710,6 +2710,11 @@ int dvb_frontend_resume(struct dvb_frontend *fe) else if (fe->ops.tuner_ops.init) ret = fe->ops.tuner_ops.init(fe); + if (fe->ops.set_tone && fepriv->tone != -1) + fe->ops.set_tone(fe, fepriv->tone); + if (fe->ops.set_voltage && fepriv->voltage != -1) + fe->ops.set_voltage(fe, fepriv->voltage); + fe->exit = DVB_FE_NO_EXIT; fepriv->state = FESTATE_RETUNE; dvb_frontend_wakeup(fe); diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h index 97661b2f247a..458bcce20e38 100644 --- a/drivers/media/dvb-core/dvb_frontend.h +++ b/drivers/media/dvb-core/dvb_frontend.h @@ -42,6 +42,29 @@ #include "dvbdev.h" +/** + * DOC: Digital TV Frontend + * + * The Digital TV Frontend kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * frontend layer. It is only of interest for Digital TV device driver writers. + * The header file for this API is named dvb_frontend.h and located in + * drivers/media/dvb-core. + * + * Before using the Digital TV frontend core, the bridge driver should attach + * the frontend demod, tuner and SEC devices and call dvb_register_frontend(), + * in order to register the new frontend at the subsystem. At device + * detach/removal, the bridge driver should call dvb_unregister_frontend() to + * remove the frontend from the core and then dvb_frontend_detach() to free the + * memory allocated by the frontend drivers. + * + * The drivers should also call dvb_frontend_suspend() as part of their + * handler for the &device_driver.suspend(), and dvb_frontend_resume() as + * part of their handler for &device_driver.resume(). + * + * A few other optional functions are provided to handle some special cases. + */ + /* * Maximum number of Delivery systems per frontend. It * should be smaller or equal to 32 @@ -112,16 +135,6 @@ struct analog_parameters { u64 std; }; -enum tuner_param { - DVBFE_TUNER_FREQUENCY = (1 << 0), - DVBFE_TUNER_TUNERSTEP = (1 << 1), - DVBFE_TUNER_IFFREQ = (1 << 2), - DVBFE_TUNER_BANDWIDTH = (1 << 3), - DVBFE_TUNER_REFCLOCK = (1 << 4), - DVBFE_TUNER_IQSENSE = (1 << 5), - DVBFE_TUNER_DUMMY = (1 << 31) -}; - /** * enum dvbfe_algo - defines the algorithm used to tune into a channel * @@ -152,15 +165,6 @@ enum dvbfe_algo { DVBFE_ALGO_RECOVERY = (1 << 31) }; -struct tuner_state { - u32 frequency; - u32 tunerstep; - u32 ifreq; - u32 bandwidth; - u32 iqsense; - u32 refclock; -}; - /** * enum dvbfe_search - search callback possible return status * @@ -209,12 +213,12 @@ enum dvbfe_search { * are stored at @dvb_frontend.dtv_property_cache;. The * tuner demod can change the parameters to reflect the * changes needed for the channel to be tuned, and - * update statistics. + * update statistics. This is the recommended way to set + * the tuner parameters and should be used on newer + * drivers. * @set_analog_params: callback function used to tune into an analog TV * channel on hybrid tuners. It passes @analog_parameters; * to the driver. - * @calc_regs: callback function used to pass register data settings - * for simple tuners. * @set_config: callback function used to send some tuner-specific * parameters. * @get_frequency: get the actual tuned frequency @@ -227,16 +231,10 @@ enum dvbfe_search { * via DVBv5 API (@dvb_frontend.dtv_property_cache;). * @get_afc: Used only by analog TV core. Reports the frequency * drift due to AFC. - * @set_frequency: Set a new frequency. Please notice that using - * set_params is preferred. - * @set_bandwidth: Set a new frequency. Please notice that using - * set_params is preferred. - * @set_state: callback function used on some legacy drivers that - * don't implement set_params in order to set properties. - * Shouldn't be used on new drivers. - * @get_state: callback function used to get properties by some - * legacy drivers that don't implement set_params. - * Shouldn't be used on new drivers. + * @calc_regs: callback function used to pass register data settings + * for simple tuners. Shouldn't be used on newer drivers. + * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers. + * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers. * * NOTE: frequencies used on get_frequency and set_frequency are in Hz for * terrestrial/cable or kHz for satellite. @@ -252,14 +250,10 @@ struct dvb_tuner_ops { int (*suspend)(struct dvb_frontend *fe); int (*resume)(struct dvb_frontend *fe); - /** This is for simple PLLs - set all parameters in one go. */ + /* This is the recomended way to set the tuner */ int (*set_params)(struct dvb_frontend *fe); int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p); - /** This is support for demods like the mt352 - fills out the supplied buffer with what to write. */ - int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); - - /** This is to allow setting tuner-specific configs */ int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); @@ -272,17 +266,23 @@ struct dvb_tuner_ops { int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength); int (*get_afc)(struct dvb_frontend *fe, s32 *afc); - /** These are provided separately from set_params in order to facilitate silicon - * tuners which require sophisticated tuning loops, controlling each parameter separately. */ - int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); - int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); + /* + * This is support for demods like the mt352 - fills out the supplied + * buffer with what to write. + * + * Don't use on newer drivers. + */ + int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); /* - * These are provided separately from set_params in order to facilitate silicon - * tuners which require sophisticated tuning loops, controlling each parameter separately. + * These are provided separately from set_params in order to + * facilitate silicon tuners which require sophisticated tuning loops, + * controlling each parameter separately. + * + * Don't use on newer drivers. */ - int (*set_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state); - int (*get_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state); + int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); + int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); }; /** @@ -404,6 +404,11 @@ struct dtv_frontend_properties; * FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite). * @dishnetwork_send_legacy_command: callback function to implement the * FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite). + * Drivers should not use this, except when the DVB + * core emulation fails to provide proper support (e.g. + * if set_voltage() takes more than 8ms to work), and + * when backward compatibility with this legacy API is + * required. * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C * mux support instead. * @ts_bus_ctrl: callback function used to take control of the TS bus. @@ -466,7 +471,8 @@ struct dvb_frontend_ops { int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); int (*set_lna)(struct dvb_frontend *); - /* These callbacks are for devices that implement their own + /* + * These callbacks are for devices that implement their own * tuning algorithms, rather than a simple swzigzag */ enum dvbfe_search (*search)(struct dvb_frontend *fe); @@ -682,17 +688,126 @@ struct dvb_frontend { unsigned int exit; }; -extern int dvb_register_frontend(struct dvb_adapter *dvb, +/** + * dvb_register_frontend() - Registers a DVB frontend at the adapter + * + * @dvb: pointer to the dvb adapter + * @fe: pointer to the frontend struct + * + * Allocate and initialize the private data needed by the frontend core to + * manage the frontend and calls dvb_register_device() to register a new + * frontend. It also cleans the property cache that stores the frontend + * parameters and selects the first available delivery system. + */ +int dvb_register_frontend(struct dvb_adapter *dvb, struct dvb_frontend *fe); -extern int dvb_unregister_frontend(struct dvb_frontend *fe); +/** + * dvb_unregister_frontend() - Unregisters a DVB frontend + * + * @fe: pointer to the frontend struct + * + * Stops the frontend kthread, calls dvb_unregister_device() and frees the + * private frontend data allocated by dvb_register_frontend(). + * + * NOTE: This function doesn't frees the memory allocated by the demod, + * by the SEC driver and by the tuner. In order to free it, an explicit call to + * dvb_frontend_detach() is needed, after calling this function. + */ +int dvb_unregister_frontend(struct dvb_frontend *fe); -extern void dvb_frontend_detach(struct dvb_frontend *fe); +/** + * dvb_frontend_detach() - Detaches and frees frontend specific data + * + * @fe: pointer to the frontend struct + * + * This function should be called after dvb_unregister_frontend(). It + * calls the SEC, tuner and demod release functions: + * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release, + * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release. + * + * If the driver is compiled with CONFIG_MEDIA_ATTACH, it also decreases + * the module reference count, needed to allow userspace to remove the + * previously used DVB frontend modules. + */ +void dvb_frontend_detach(struct dvb_frontend *fe); + +/** + * dvb_frontend_suspend() - Suspends a Digital TV frontend + * + * @fe: pointer to the frontend struct + * + * This function prepares a Digital TV frontend to suspend. + * + * In order to prepare the tuner to suspend, if + * &dvb_frontend_ops.tuner_ops.suspend() is available, it calls it. Otherwise, + * it will call &dvb_frontend_ops.tuner_ops.sleep(), if available. + * + * It will also call &dvb_frontend_ops.sleep() to put the demod to suspend. + * + * The drivers should also call dvb_frontend_suspend() as part of their + * handler for the &device_driver.suspend(). + */ +int dvb_frontend_suspend(struct dvb_frontend *fe); + +/** + * dvb_frontend_resume() - Resumes a Digital TV frontend + * + * @fe: pointer to the frontend struct + * + * This function resumes the usual operation of the tuner after resume. + * + * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init(). + * + * If &dvb_frontend_ops.tuner_ops.resume() is available, It, it calls it. + * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init(), if available. + * + * Once tuner and demods are resumed, it will enforce that the SEC voltage and + * tone are restored to their previous values and wake up the frontend's + * kthread in order to retune the frontend. + * + * The drivers should also call dvb_frontend_resume() as part of their + * handler for the &device_driver.resume(). + */ +int dvb_frontend_resume(struct dvb_frontend *fe); -extern void dvb_frontend_reinitialise(struct dvb_frontend *fe); -extern int dvb_frontend_suspend(struct dvb_frontend *fe); -extern int dvb_frontend_resume(struct dvb_frontend *fe); +/** + * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend + * + * @fe: pointer to the frontend struct + * + * Calls &dvb_frontend_ops.init() and &dvb_frontend_ops.tuner_ops.init(), + * and resets SEC tone and voltage (for Satellite systems). + * + * NOTE: Currently, this function is used only by one driver (budget-av). + * It seems to be due to address some special issue with that specific + * frontend. + */ +void dvb_frontend_reinitialise(struct dvb_frontend *fe); -extern void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); +/** + * dvb_frontend_sleep_until() - Sleep for the amount of time given by + * add_usec parameter + * + * @waketime: pointer to a struct ktime_t + * @add_usec: time to sleep, in microseconds + * + * This function is used to measure the time required for the + * %FE_DISHNETWORK_SEND_LEGACY_CMD ioctl to work. It needs to be as precise + * as possible, as it affects the detection of the dish tone command at the + * satellite subsystem. + * + * Its used internally by the DVB frontend core, in order to emulate + * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage() + * callback. + * + * NOTE: it should not be used at the drivers, as the emulation for the + * legacy callback is provided by the Kernel. The only situation where this + * should be at the drivers is when there are some bugs at the hardware that + * would prevent the core emulation to work. On such cases, the driver would + * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command() and + * calling this function directly. + */ +void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); #endif diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 292c9479bb75..310e4b8beae8 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig @@ -264,7 +264,7 @@ config DVB_MB86A16 config DVB_TDA10071 tristate "NXP TDA10071" depends on DVB_CORE && I2C - select REGMAP + select REGMAP_I2C default m if !MEDIA_SUBDRV_AUTOSELECT help Say Y when you want to support this frontend. diff --git a/drivers/media/dvb-frontends/au8522_common.c b/drivers/media/dvb-frontends/au8522_common.c index 3559ff230045..f135126bc373 100644 --- a/drivers/media/dvb-frontends/au8522_common.c +++ b/drivers/media/dvb-frontends/au8522_common.c @@ -44,7 +44,7 @@ int au8522_writereg(struct au8522_state *state, u16 reg, u8 data) int ret; u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data }; - struct i2c_msg msg = { .addr = state->config->demod_address, + struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0, .buf = buf, .len = 3 }; ret = i2c_transfer(state->i2c, &msg, 1); @@ -64,9 +64,9 @@ u8 au8522_readreg(struct au8522_state *state, u16 reg) u8 b1[] = { 0 }; struct i2c_msg msg[] = { - { .addr = state->config->demod_address, .flags = 0, + { .addr = state->config.demod_address, .flags = 0, .buf = b0, .len = 2 }, - { .addr = state->config->demod_address, .flags = I2C_M_RD, + { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); @@ -140,7 +140,7 @@ EXPORT_SYMBOL(au8522_release_state); static int au8522_led_gpio_enable(struct au8522_state *state, int onoff) { - struct au8522_led_config *led_config = state->config->led_cfg; + struct au8522_led_config *led_config = state->config.led_cfg; u8 val; /* bail out if we can't control an LED */ @@ -170,7 +170,7 @@ static int au8522_led_gpio_enable(struct au8522_state *state, int onoff) */ int au8522_led_ctrl(struct au8522_state *state, int led) { - struct au8522_led_config *led_config = state->config->led_cfg; + struct au8522_led_config *led_config = state->config.led_cfg; int i, ret = 0; /* bail out if we can't control an LED */ diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c index 28d7dc2fee34..c8f13d8370e5 100644 --- a/drivers/media/dvb-frontends/au8522_decoder.c +++ b/drivers/media/dvb-frontends/au8522_decoder.c @@ -730,7 +730,6 @@ static int au8522_probe(struct i2c_client *client, struct v4l2_ctrl_handler *hdl; struct v4l2_subdev *sd; int instance; - struct au8522_config *demod_config; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, @@ -754,15 +753,7 @@ static int au8522_probe(struct i2c_client *client, break; } - demod_config = kzalloc(sizeof(struct au8522_config), GFP_KERNEL); - if (demod_config == NULL) { - if (instance == 1) - kfree(state); - return -ENOMEM; - } - demod_config->demod_address = 0x8e >> 1; - - state->config = demod_config; + state->config.demod_address = 0x8e >> 1; state->i2c = client->adapter; sd = &state->sd; @@ -784,8 +775,7 @@ static int au8522_probe(struct i2c_client *client, int err = hdl->error; v4l2_ctrl_handler_free(hdl); - kfree(demod_config); - kfree(state); + au8522_release_state(state); return err; } diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c index f956f13fb3dc..6c1e97640f3f 100644 --- a/drivers/media/dvb-frontends/au8522_dig.c +++ b/drivers/media/dvb-frontends/au8522_dig.c @@ -566,7 +566,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe, au8522_writereg(state, VSB_mod_tab[i].reg, VSB_mod_tab[i].data); - au8522_set_if(fe, state->config->vsb_if); + au8522_set_if(fe, state->config.vsb_if); break; case QAM_64: dprintk("%s() QAM 64\n", __func__); @@ -574,7 +574,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe, au8522_writereg(state, QAM64_mod_tab[i].reg, QAM64_mod_tab[i].data); - au8522_set_if(fe, state->config->qam_if); + au8522_set_if(fe, state->config.qam_if); break; case QAM_256: if (zv_mode) { @@ -583,7 +583,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe, au8522_writereg(state, QAM256_mod_tab_zv_mode[i].reg, QAM256_mod_tab_zv_mode[i].data); - au8522_set_if(fe, state->config->qam_if); + au8522_set_if(fe, state->config.qam_if); msleep(100); au8522_writereg(state, 0x821a, 0x00); } else { @@ -592,7 +592,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe, au8522_writereg(state, QAM256_mod_tab[i].reg, QAM256_mod_tab[i].data); - au8522_set_if(fe, state->config->qam_if); + au8522_set_if(fe, state->config.qam_if); } break; default: @@ -666,7 +666,7 @@ static int au8522_read_status(struct dvb_frontend *fe, enum fe_status *status) *status |= FE_HAS_LOCK | FE_HAS_SYNC; } - switch (state->config->status_mode) { + switch (state->config.status_mode) { case AU8522_DEMODLOCKING: dprintk("%s() DEMODLOCKING\n", __func__); if (*status & FE_HAS_VITERBI) @@ -704,7 +704,7 @@ static int au8522_read_status(struct dvb_frontend *fe, enum fe_status *status) static int au8522_led_status(struct au8522_state *state, const u16 *snr) { - struct au8522_led_config *led_config = state->config->led_cfg; + struct au8522_led_config *led_config = state->config.led_cfg; int led; u16 strong; @@ -758,7 +758,7 @@ static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr) au8522_readreg(state, 0x4311), snr); - if (state->config->led_cfg) + if (state->config.led_cfg) au8522_led_status(state, snr); return ret; @@ -866,7 +866,7 @@ struct dvb_frontend *au8522_attach(const struct au8522_config *config, } /* setup the state */ - state->config = config; + state->config = *config; state->i2c = i2c; state->operational_mode = AU8522_DIGITAL_MODE; diff --git a/drivers/media/dvb-frontends/au8522_priv.h b/drivers/media/dvb-frontends/au8522_priv.h index 951b3847e6f6..ee330c61aa61 100644 --- a/drivers/media/dvb-frontends/au8522_priv.h +++ b/drivers/media/dvb-frontends/au8522_priv.h @@ -50,7 +50,7 @@ struct au8522_state { struct list_head hybrid_tuner_instance_list; /* configuration settings */ - const struct au8522_config *config; + struct au8522_config config; struct dvb_frontend frontend; diff --git a/drivers/media/dvb-frontends/bsbe1-d01a.h b/drivers/media/dvb-frontends/bsbe1-d01a.h index 7ed3c424178c..baaf89e768cf 100644 --- a/drivers/media/dvb-frontends/bsbe1-d01a.h +++ b/drivers/media/dvb-frontends/bsbe1-d01a.h @@ -21,7 +21,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef BSBE1_D01A_H diff --git a/drivers/media/dvb-frontends/bsbe1.h b/drivers/media/dvb-frontends/bsbe1.h index 53e4d0dbb745..4ad766154741 100644 --- a/drivers/media/dvb-frontends/bsbe1.h +++ b/drivers/media/dvb-frontends/bsbe1.h @@ -19,7 +19,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef BSBE1_H diff --git a/drivers/media/dvb-frontends/bsru6.h b/drivers/media/dvb-frontends/bsru6.h index c2a578e1314d..275c1782597d 100644 --- a/drivers/media/dvb-frontends/bsru6.h +++ b/drivers/media/dvb-frontends/bsru6.h @@ -19,7 +19,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef BSRU6_H diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c index b46450a10b80..6913cd687b4d 100644 --- a/drivers/media/dvb-frontends/isl6405.c +++ b/drivers/media/dvb-frontends/isl6405.c @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> diff --git a/drivers/media/dvb-frontends/isl6405.h b/drivers/media/dvb-frontends/isl6405.h index 3c148b830bd1..4a23d3bdf3e6 100644 --- a/drivers/media/dvb-frontends/isl6405.h +++ b/drivers/media/dvb-frontends/isl6405.h @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef _ISL6405_H diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c index 3a4d4606a426..0b6d3837d5de 100644 --- a/drivers/media/dvb-frontends/isl6421.c +++ b/drivers/media/dvb-frontends/isl6421.c @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> diff --git a/drivers/media/dvb-frontends/isl6421.h b/drivers/media/dvb-frontends/isl6421.h index 3273597833fd..00f9874ca5a2 100644 --- a/drivers/media/dvb-frontends/isl6421.h +++ b/drivers/media/dvb-frontends/isl6421.h @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef _ISL6421_H diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c index 4aca0fb9a8a7..6261460d93a7 100644 --- a/drivers/media/dvb-frontends/lnbp21.c +++ b/drivers/media/dvb-frontends/lnbp21.c @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h index a9b530de62a6..cd9101f6e579 100644 --- a/drivers/media/dvb-frontends/lnbp21.h +++ b/drivers/media/dvb-frontends/lnbp21.h @@ -21,7 +21,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef _LNBP21_H diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c index d7ca0fdd0084..5c5fd04fd4a7 100644 --- a/drivers/media/dvb-frontends/lnbp22.c +++ b/drivers/media/dvb-frontends/lnbp22.c @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h index 628148385182..5d01d92814c2 100644 --- a/drivers/media/dvb-frontends/lnbp22.h +++ b/drivers/media/dvb-frontends/lnbp22.h @@ -22,7 +22,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org + * the project's page is at https://linuxtv.org */ #ifndef _LNBP22_H diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 78b87b260d74..10f2119935da 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -976,7 +976,8 @@ static int rtl2832_regmap_read(void *context, const void *reg_buf, ret = __i2c_transfer(client->adapter, msg, 2); if (ret != 2) { - dev_warn(&client->dev, "i2c reg read failed %d\n", ret); + dev_warn(&client->dev, "i2c reg read failed %d reg %02x\n", + ret, *(u8 *)reg_buf); if (ret >= 0) ret = -EREMOTEIO; return ret; @@ -999,7 +1000,8 @@ static int rtl2832_regmap_write(void *context, const void *data, size_t count) ret = __i2c_transfer(client->adapter, msg, 1); if (ret != 1) { - dev_warn(&client->dev, "i2c reg write failed %d\n", ret); + dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n", + ret, *(u8 *)data); if (ret >= 0) ret = -EREMOTEIO; return ret; @@ -1028,7 +1030,8 @@ static int rtl2832_regmap_gather_write(void *context, const void *reg, ret = __i2c_transfer(client->adapter, msg, 1); if (ret != 1) { - dev_warn(&client->dev, "i2c reg write failed %d\n", ret); + dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n", + ret, *(u8 const *)reg); if (ret >= 0) ret = -EREMOTEIO; return ret; @@ -1097,18 +1100,6 @@ static int rtl2832_enable_slave_ts(struct i2c_client *client) if (ret) goto err; - ret = rtl2832_bulk_write(client, 0x022, "\x01", 1); - if (ret) - goto err; - - ret = rtl2832_bulk_write(client, 0x026, "\x1f", 1); - if (ret) - goto err; - - ret = rtl2832_bulk_write(client, 0x027, "\xff", 1); - if (ret) - goto err; - ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3); if (ret) goto err; diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c index dcd8d94c1037..b860f02a4e55 100644 --- a/drivers/media/dvb-frontends/rtl2832_sdr.c +++ b/drivers/media/dvb-frontends/rtl2832_sdr.c @@ -310,7 +310,7 @@ static void rtl2832_sdr_urb_complete(struct urb *urb) len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer, urb->actual_length); vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len); - v4l2_get_timestamp(&fbuf->vb.timestamp); + fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); fbuf->vb.sequence = dev->sequence++; vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); } @@ -490,7 +490,7 @@ static int rtl2832_sdr_querycap(struct file *file, void *fh, /* Videobuf2 operations */ static int rtl2832_sdr_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, + unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq); diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c index 7c2eeee69757..1cf6e52e0105 100644 --- a/drivers/media/dvb-frontends/si2165.c +++ b/drivers/media/dvb-frontends/si2165.c @@ -1,21 +1,21 @@ /* - Driver for Silicon Labs Si2161 DVB-T and Si2165 DVB-C/-T Demodulator - - Copyright (C) 2013-2014 Matthias Schwarzott <zzam@gentoo.org> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - References: - http://www.silabs.com/Support%20Documents/TechnicalDocs/Si2165-short.pdf -*/ + * Driver for Silicon Labs Si2161 DVB-T and Si2165 DVB-C/-T Demodulator + * + * Copyright (C) 2013-2014 Matthias Schwarzott <zzam@gentoo.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * References: + * http://www.silabs.com/Support%20Documents/TechnicalDocs/Si2165-short.pdf + */ #include <linux/delay.h> #include <linux/errno.h> @@ -31,16 +31,18 @@ #include "si2165_priv.h" #include "si2165.h" -/* Hauppauge WinTV-HVR-930C-HD B130 / PCTV QuatroStick 521e 1113xx - * uses 16 MHz xtal */ - -/* Hauppauge WinTV-HVR-930C-HD B131 / PCTV QuatroStick 522e 1114xx - * uses 24 MHz clock provided by tuner */ +/* + * Hauppauge WinTV-HVR-930C-HD B130 / PCTV QuatroStick 521e 1113xx + * uses 16 MHz xtal + * + * Hauppauge WinTV-HVR-930C-HD B131 / PCTV QuatroStick 522e 1114xx + * uses 24 MHz clock provided by tuner + */ struct si2165_state { struct i2c_adapter *i2c; - struct dvb_frontend frontend; + struct dvb_frontend fe; struct si2165_config config; @@ -241,6 +243,27 @@ err: return ret; } +#define REG16(reg, val) { (reg), (val) & 0xff }, { (reg)+1, (val)>>8 & 0xff } +struct si2165_reg_value_pair { + u16 reg; + u8 val; +}; + +static int si2165_write_reg_list(struct si2165_state *state, + const struct si2165_reg_value_pair *regs, + int count) +{ + int i; + int ret; + + for (i = 0; i < count; i++) { + ret = si2165_writereg8(state, regs[i].reg, regs[i].val); + if (ret < 0) + return ret; + } + return 0; +} + static int si2165_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *s) { @@ -258,8 +281,10 @@ static int si2165_init_pll(struct si2165_state *state) u8 divl = 12; u8 buf[4]; - /* hardcoded values can be deleted if calculation is verified - * or it yields the same values as the windows driver */ + /* + * hardcoded values can be deleted if calculation is verified + * or it yields the same values as the windows driver + */ switch (ref_freq_Hz) { case 16000000u: divn = 56; @@ -274,8 +299,10 @@ static int si2165_init_pll(struct si2165_state *state) if (ref_freq_Hz > 16000000u) divr = 2; - /* now select divn and divp such that - * fvco is in 1624..1824 MHz */ + /* + * now select divn and divp such that + * fvco is in 1624..1824 MHz + */ if (1624000000u * divr > ref_freq_Hz * 2u * 63u) divp = 4; @@ -341,10 +368,12 @@ static int si2165_upload_firmware_block(struct si2165_state *state, if (len % 4 != 0) return -EINVAL; - deb_fw_load("si2165_upload_firmware_block called with len=0x%x offset=0x%x blockcount=0x%x\n", + deb_fw_load( + "si2165_upload_firmware_block called with len=0x%x offset=0x%x blockcount=0x%x\n", len, offset, block_count); while (offset+12 <= len && cur_block < block_count) { - deb_fw_load("si2165_upload_firmware_block in while len=0x%x offset=0x%x cur_block=0x%x blockcount=0x%x\n", + deb_fw_load( + "si2165_upload_firmware_block in while len=0x%x offset=0x%x cur_block=0x%x blockcount=0x%x\n", len, offset, cur_block, block_count); wordcount = data[offset]; if (wordcount < 1 || data[offset+1] || @@ -383,7 +412,8 @@ static int si2165_upload_firmware_block(struct si2165_state *state, cur_block++; } - deb_fw_load("si2165_upload_firmware_block after while len=0x%x offset=0x%x cur_block=0x%x blockcount=0x%x\n", + deb_fw_load( + "si2165_upload_firmware_block after while len=0x%x offset=0x%x cur_block=0x%x blockcount=0x%x\n", len, offset, cur_block, block_count); if (poffset) @@ -633,7 +663,7 @@ static int si2165_init(struct dvb_frontend *fe) goto error; /* ber_pkt */ - ret = si2165_writereg16(state, 0x0470 , 0x7530); + ret = si2165_writereg16(state, 0x0470, 0x7530); if (ret < 0) goto error; @@ -660,22 +690,19 @@ static int si2165_init(struct dvb_frontend *fe) goto error; } - /* write adc values after each reset*/ - ret = si2165_writereg8(state, 0x012a, 0x46); - if (ret < 0) - goto error; - ret = si2165_writereg8(state, 0x012c, 0x00); + /* ts output config */ + ret = si2165_writereg8(state, 0x04e4, 0x20); if (ret < 0) - goto error; - ret = si2165_writereg8(state, 0x012e, 0x0a); + return ret; + ret = si2165_writereg16(state, 0x04ef, 0x00fe); if (ret < 0) - goto error; - ret = si2165_writereg8(state, 0x012f, 0xff); + return ret; + ret = si2165_writereg24(state, 0x04f4, 0x555555); if (ret < 0) - goto error; - ret = si2165_writereg8(state, 0x0123, 0x70); + return ret; + ret = si2165_writereg8(state, 0x04e5, 0x01); if (ret < 0) - goto error; + return ret; return 0; error: @@ -733,16 +760,26 @@ static int si2165_set_oversamp(struct si2165_state *state, u32 dvb_rate) do_div(oversamp, dvb_rate); reg_value = oversamp & 0x3fffffff; - /* oversamp, usbdump contained 0x03100000; */ + dprintk("%s: Write oversamp=%#x\n", __func__, reg_value); return si2165_writereg32(state, 0x00e4, reg_value); } -static int si2165_set_if_freq_shift(struct si2165_state *state, u32 IF) +static int si2165_set_if_freq_shift(struct si2165_state *state) { + struct dvb_frontend *fe = &state->fe; u64 if_freq_shift; s32 reg_value = 0; u32 fe_clk = si2165_get_fe_clk(state); + u32 IF = 0; + if (!fe->ops.tuner_ops.get_if_frequency) { + dev_err(&state->i2c->dev, + "%s: Error: get_if_frequency() not defined at tuner. Can't work without it!\n", + KBUILD_MODNAME); + return -EINVAL; + } + + fe->ops.tuner_ops.get_if_frequency(fe, &IF); if_freq_shift = IF; if_freq_shift <<= 29; @@ -758,25 +795,39 @@ static int si2165_set_if_freq_shift(struct si2165_state *state, u32 IF) return si2165_writereg32(state, 0x00e8, reg_value); } -static int si2165_set_parameters(struct dvb_frontend *fe) +static const struct si2165_reg_value_pair dvbt_regs[] = { + /* standard = DVB-T */ + { 0x00ec, 0x01 }, + { 0x08f8, 0x00 }, + /* impulsive_noise_remover */ + { 0x031c, 0x01 }, + { 0x00cb, 0x00 }, + /* agc2 */ + { 0x016e, 0x41 }, + { 0x016c, 0x0e }, + { 0x016d, 0x10 }, + /* agc */ + { 0x015b, 0x03 }, + { 0x0150, 0x78 }, + /* agc */ + { 0x01a0, 0x78 }, + { 0x01c8, 0x68 }, + /* freq_sync_range */ + REG16(0x030c, 0x0064), + /* gp_reg0 */ + { 0x0387, 0x00 } +}; + +static int si2165_set_frontend_dvbt(struct dvb_frontend *fe) { int ret; struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct si2165_state *state = fe->demodulator_priv; - u8 val[3]; - u32 IF; u32 dvb_rate = 0; u16 bw10k; dprintk("%s: called\n", __func__); - if (!fe->ops.tuner_ops.get_if_frequency) { - dev_err(&state->i2c->dev, - "%s: Error: get_if_frequency() not defined at tuner. Can't work without it!\n", - KBUILD_MODNAME); - return -EINVAL; - } - if (!state->has_dvbt) return -EINVAL; @@ -788,34 +839,10 @@ static int si2165_set_parameters(struct dvb_frontend *fe) bw10k = 800; } - /* standard = DVB-T */ - ret = si2165_writereg8(state, 0x00ec, 0x01); - if (ret < 0) - return ret; ret = si2165_adjust_pll_divl(state, 12); if (ret < 0) return ret; - fe->ops.tuner_ops.get_if_frequency(fe, &IF); - ret = si2165_set_if_freq_shift(state, IF); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x08f8, 0x00); - if (ret < 0) - return ret; - /* ts output config */ - ret = si2165_writereg8(state, 0x04e4, 0x20); - if (ret < 0) - return ret; - ret = si2165_writereg16(state, 0x04ef, 0x00fe); - if (ret < 0) - return ret; - ret = si2165_writereg24(state, 0x04f4, 0x555555); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x04e5, 0x01); - if (ret < 0) - return ret; /* bandwidth in 10KHz steps */ ret = si2165_writereg16(state, 0x0308, bw10k); if (ret < 0) @@ -823,48 +850,115 @@ static int si2165_set_parameters(struct dvb_frontend *fe) ret = si2165_set_oversamp(state, dvb_rate); if (ret < 0) return ret; - /* impulsive_noise_remover */ - ret = si2165_writereg8(state, 0x031c, 0x01); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x00cb, 0x00); + + ret = si2165_write_reg_list(state, dvbt_regs, ARRAY_SIZE(dvbt_regs)); if (ret < 0) return ret; + + return 0; +} + +static const struct si2165_reg_value_pair dvbc_regs[] = { + /* standard = DVB-C */ + { 0x00ec, 0x05 }, + { 0x08f8, 0x00 }, + /* agc2 */ - ret = si2165_writereg8(state, 0x016e, 0x41); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x016c, 0x0e); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x016d, 0x10); - if (ret < 0) - return ret; + { 0x016e, 0x50 }, + { 0x016c, 0x0e }, + { 0x016d, 0x10 }, /* agc */ - ret = si2165_writereg8(state, 0x015b, 0x03); - if (ret < 0) - return ret; - ret = si2165_writereg8(state, 0x0150, 0x78); - if (ret < 0) - return ret; + { 0x015b, 0x03 }, + { 0x0150, 0x68 }, /* agc */ - ret = si2165_writereg8(state, 0x01a0, 0x78); + { 0x01a0, 0x68 }, + { 0x01c8, 0x50 }, + + { 0x0278, 0x0d }, + + { 0x023a, 0x05 }, + { 0x0261, 0x09 }, + REG16(0x0350, 0x3e80), + { 0x02f4, 0x00 }, + + { 0x00cb, 0x01 }, + REG16(0x024c, 0x0000), + REG16(0x027c, 0x0000), + { 0x0232, 0x03 }, + { 0x02f4, 0x0b }, + { 0x018b, 0x00 }, +}; + +static int si2165_set_frontend_dvbc(struct dvb_frontend *fe) +{ + struct si2165_state *state = fe->demodulator_priv; + int ret; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + const u32 dvb_rate = p->symbol_rate; + const u32 bw_hz = p->bandwidth_hz; + + if (!state->has_dvbc) + return -EINVAL; + + if (dvb_rate == 0) + return -EINVAL; + + ret = si2165_adjust_pll_divl(state, 14); if (ret < 0) return ret; - ret = si2165_writereg8(state, 0x01c8, 0x68); + + /* Oversampling */ + ret = si2165_set_oversamp(state, dvb_rate); if (ret < 0) return ret; - /* freq_sync_range */ - ret = si2165_writereg16(state, 0x030c, 0x0064); + + ret = si2165_writereg32(state, 0x00c4, bw_hz); if (ret < 0) return ret; - /* gp_reg0 */ - ret = si2165_readreg8(state, 0x0387, val); + + ret = si2165_write_reg_list(state, dvbc_regs, ARRAY_SIZE(dvbc_regs)); if (ret < 0) return ret; - ret = si2165_writereg8(state, 0x0387, 0x00); + + return 0; +} + +static const struct si2165_reg_value_pair agc_rewrite[] = { + { 0x012a, 0x46 }, + { 0x012c, 0x00 }, + { 0x012e, 0x0a }, + { 0x012f, 0xff }, + { 0x0123, 0x70 } +}; + +static int si2165_set_frontend(struct dvb_frontend *fe) +{ + struct si2165_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 delsys = p->delivery_system; + int ret; + u8 val[3]; + + /* initial setting of if freq shift */ + ret = si2165_set_if_freq_shift(state); if (ret < 0) return ret; + + switch (delsys) { + case SYS_DVBT: + ret = si2165_set_frontend_dvbt(fe); + if (ret < 0) + return ret; + break; + case SYS_DVBC_ANNEX_A: + ret = si2165_set_frontend_dvbc(fe); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + } + /* dsp_addr_jump */ ret = si2165_writereg32(state, 0x0348, 0xf4000000); if (ret < 0) @@ -874,8 +968,7 @@ static int si2165_set_parameters(struct dvb_frontend *fe) fe->ops.tuner_ops.set_params(fe); /* recalc if_freq_shift if IF might has changed */ - fe->ops.tuner_ops.get_if_frequency(fe, &IF); - ret = si2165_set_if_freq_shift(state, IF); + ret = si2165_set_if_freq_shift(state); if (ret < 0) return ret; @@ -886,6 +979,7 @@ static int si2165_set_parameters(struct dvb_frontend *fe) ret = si2165_writereg8(state, 0x0341, 0x00); if (ret < 0) return ret; + /* reset all */ ret = si2165_writereg8(state, 0x00c0, 0x00); if (ret < 0) @@ -894,6 +988,13 @@ static int si2165_set_parameters(struct dvb_frontend *fe) ret = si2165_writereg32(state, 0x0384, 0x00000000); if (ret < 0) return ret; + + /* write adc values after each reset*/ + ret = si2165_write_reg_list(state, agc_rewrite, + ARRAY_SIZE(agc_rewrite)); + if (ret < 0) + return ret; + /* start_synchro */ ret = si2165_writereg8(state, 0x02e0, 0x01); if (ret < 0) @@ -917,7 +1018,12 @@ static void si2165_release(struct dvb_frontend *fe) static struct dvb_frontend_ops si2165_ops = { .info = { .name = "Silicon Labs ", - .caps = FE_CAN_FEC_1_2 | + /* For DVB-C */ + .symbol_rate_min = 1000000, + .symbol_rate_max = 7200000, + /* For DVB-T */ + .frequency_stepsize = 166667, + .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | @@ -930,7 +1036,6 @@ static struct dvb_frontend_ops si2165_ops = { FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | - FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_MUTE_TS | @@ -943,7 +1048,7 @@ static struct dvb_frontend_ops si2165_ops = { .init = si2165_init, .sleep = si2165_sleep, - .set_frontend = si2165_set_parameters, + .set_frontend = si2165_set_frontend, .read_status = si2165_read_status, .release = si2165_release, @@ -979,9 +1084,9 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config, } /* create dvb_frontend */ - memcpy(&state->frontend.ops, &si2165_ops, + memcpy(&state->fe.ops, &si2165_ops, sizeof(struct dvb_frontend_ops)); - state->frontend.demodulator_priv = state; + state->fe.demodulator_priv = state; /* powerup */ io_ret = si2165_writereg8(state, 0x0000, state->config.chip_mode); @@ -1033,20 +1138,22 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config, KBUILD_MODNAME, chip_name, rev_char, state->chip_type, state->chip_revcode); - strlcat(state->frontend.ops.info.name, chip_name, - sizeof(state->frontend.ops.info.name)); + strlcat(state->fe.ops.info.name, chip_name, + sizeof(state->fe.ops.info.name)); n = 0; if (state->has_dvbt) { - state->frontend.ops.delsys[n++] = SYS_DVBT; - strlcat(state->frontend.ops.info.name, " DVB-T", - sizeof(state->frontend.ops.info.name)); + state->fe.ops.delsys[n++] = SYS_DVBT; + strlcat(state->fe.ops.info.name, " DVB-T", + sizeof(state->fe.ops.info.name)); + } + if (state->has_dvbc) { + state->fe.ops.delsys[n++] = SYS_DVBC_ANNEX_A; + strlcat(state->fe.ops.info.name, " DVB-C", + sizeof(state->fe.ops.info.name)); } - if (state->has_dvbc) - dev_warn(&state->i2c->dev, "%s: DVB-C is not yet supported.\n", - KBUILD_MODNAME); - return &state->frontend; + return &state->fe; error: kfree(state); diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 4ef8a5c7003e..c978c801c7aa 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -252,6 +252,7 @@ static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { int rc; u8 f; + u32 bw; struct stb6100_state *state = fe->tuner_priv; rc = stb6100_read_reg(state, STB6100_F); @@ -259,9 +260,9 @@ static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) return rc; f = rc & STB6100_F_F; - state->status.bandwidth = (f + 5) * 2000; /* x2 for ZIF */ + bw = (f + 5) * 2000; /* x2 for ZIF */ - *bandwidth = state->bandwidth = state->status.bandwidth * 1000; + *bandwidth = state->bandwidth = bw * 1000; dprintk(verbose, FE_DEBUG, 1, "bandwidth = %u Hz", state->bandwidth); return 0; } @@ -495,68 +496,28 @@ static int stb6100_sleep(struct dvb_frontend *fe) static int stb6100_init(struct dvb_frontend *fe) { struct stb6100_state *state = fe->tuner_priv; - struct tuner_state *status = &state->status; + int refclk = 27000000; /* Hz */ - status->tunerstep = 125000; - status->ifreq = 0; - status->refclock = 27000000; /* Hz */ - status->iqsense = 1; - status->bandwidth = 36000; /* kHz */ - state->bandwidth = status->bandwidth * 1000; /* Hz */ - state->reference = status->refclock / 1000; /* kHz */ + /* + * iqsense = 1 + * tunerstep = 125000 + */ + state->bandwidth = 36000000; /* Hz */ + state->reference = refclk / 1000; /* kHz */ /* Set default bandwidth. Modified, PN 13-May-10 */ return 0; } -static int stb6100_get_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *state) +static int stb6100_set_params(struct dvb_frontend *fe) { - switch (param) { - case DVBFE_TUNER_FREQUENCY: - stb6100_get_frequency(fe, &state->frequency); - break; - case DVBFE_TUNER_TUNERSTEP: - break; - case DVBFE_TUNER_IFFREQ: - break; - case DVBFE_TUNER_BANDWIDTH: - stb6100_get_bandwidth(fe, &state->bandwidth); - break; - case DVBFE_TUNER_REFCLOCK: - break; - default: - break; - } + struct dtv_frontend_properties *c = &fe->dtv_property_cache; - return 0; -} + if (c->frequency > 0) + stb6100_set_frequency(fe, c->frequency); -static int stb6100_set_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *state) -{ - struct stb6100_state *tstate = fe->tuner_priv; - - switch (param) { - case DVBFE_TUNER_FREQUENCY: - stb6100_set_frequency(fe, state->frequency); - tstate->frequency = state->frequency; - break; - case DVBFE_TUNER_TUNERSTEP: - break; - case DVBFE_TUNER_IFFREQ: - break; - case DVBFE_TUNER_BANDWIDTH: - stb6100_set_bandwidth(fe, state->bandwidth); - tstate->bandwidth = state->bandwidth; - break; - case DVBFE_TUNER_REFCLOCK: - break; - default: - break; - } + if (c->bandwidth_hz > 0) + stb6100_set_bandwidth(fe, c->bandwidth_hz); return 0; } @@ -572,8 +533,9 @@ static struct dvb_tuner_ops stb6100_ops = { .init = stb6100_init, .sleep = stb6100_sleep, .get_status = stb6100_get_status, - .get_state = stb6100_get_state, - .set_state = stb6100_set_state, + .set_params = stb6100_set_params, + .get_frequency = stb6100_get_frequency, + .get_bandwidth = stb6100_get_bandwidth, .release = stb6100_release }; diff --git a/drivers/media/dvb-frontends/stb6100.h b/drivers/media/dvb-frontends/stb6100.h index 218c8188865d..f7b468b6dc26 100644 --- a/drivers/media/dvb-frontends/stb6100.h +++ b/drivers/media/dvb-frontends/stb6100.h @@ -86,7 +86,6 @@ struct stb6100_state { const struct stb6100_config *config; struct dvb_tuner_ops ops; struct dvb_frontend *frontend; - struct tuner_state status; u32 frequency; u32 srate; diff --git a/drivers/media/dvb-frontends/stb6100_cfg.h b/drivers/media/dvb-frontends/stb6100_cfg.h index 6edc15365847..2ef67aa768b9 100644 --- a/drivers/media/dvb-frontends/stb6100_cfg.h +++ b/drivers/media/dvb-frontends/stb6100_cfg.h @@ -19,20 +19,21 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/dvb/frontend.h> +#include "dvb_frontend.h" + static int stb6100_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; int err = 0; - if (tuner_ops->get_state) { - err = tuner_ops->get_state(fe, DVBFE_TUNER_FREQUENCY, &t_state); + if (tuner_ops->get_frequency) { + err = tuner_ops->get_frequency(fe, frequency); if (err < 0) { printk("%s: Invalid parameter\n", __func__); return err; } - *frequency = t_state.frequency; } return 0; } @@ -41,13 +42,16 @@ static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u32 bw = c->bandwidth_hz; int err = 0; - t_state.frequency = frequency; + c->frequency = frequency; + c->bandwidth_hz = 0; /* Don't adjust the bandwidth */ - if (tuner_ops->set_state) { - err = tuner_ops->set_state(fe, DVBFE_TUNER_FREQUENCY, &t_state); + if (tuner_ops->set_params) { + err = tuner_ops->set_params(fe); + c->bandwidth_hz = bw; if (err < 0) { printk("%s: Invalid parameter\n", __func__); return err; @@ -60,16 +64,14 @@ static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; int err = 0; - if (tuner_ops->get_state) { - err = tuner_ops->get_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state); + if (tuner_ops->get_bandwidth) { + err = tuner_ops->get_bandwidth(fe, bandwidth); if (err < 0) { printk("%s: Invalid parameter\n", __func__); return err; } - *bandwidth = t_state.bandwidth; } return 0; } @@ -78,13 +80,16 @@ static int stb6100_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u32 freq = c->frequency; int err = 0; - t_state.bandwidth = bandwidth; + c->bandwidth_hz = bandwidth; + c->frequency = 0; /* Don't adjust the frequency */ - if (tuner_ops->set_state) { - err = tuner_ops->set_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state); + if (tuner_ops->set_params) { + err = tuner_ops->set_params(fe); + c->frequency = freq; if (err < 0) { printk("%s: Invalid parameter\n", __func__); return err; diff --git a/drivers/media/dvb-frontends/stb6100_proc.h b/drivers/media/dvb-frontends/stb6100_proc.h index bd8a0ec9e2cc..50ffa21e3871 100644 --- a/drivers/media/dvb-frontends/stb6100_proc.h +++ b/drivers/media/dvb-frontends/stb6100_proc.h @@ -17,27 +17,27 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/dvb/frontend.h> +#include "dvb_frontend.h" + static int stb6100_get_freq(struct dvb_frontend *fe, u32 *frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state state; int err = 0; - if (tuner_ops->get_state) { + if (tuner_ops->get_frequency) { if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 1); - err = tuner_ops->get_state(fe, DVBFE_TUNER_FREQUENCY, &state); + err = tuner_ops->get_frequency(fe, frequency); if (err < 0) { - printk(KERN_ERR "%s: Invalid parameter\n", __func__); + printk("%s: Invalid parameter\n", __func__); return err; } if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 0); - - *frequency = state.frequency; } return 0; @@ -47,18 +47,21 @@ static int stb6100_set_freq(struct dvb_frontend *fe, u32 frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state state; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u32 bw = c->bandwidth_hz; int err = 0; - state.frequency = frequency; + c->frequency = frequency; + c->bandwidth_hz = 0; /* Don't adjust the bandwidth */ - if (tuner_ops->set_state) { + if (tuner_ops->set_params) { if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 1); - err = tuner_ops->set_state(fe, DVBFE_TUNER_FREQUENCY, &state); + err = tuner_ops->set_params(fe); + c->bandwidth_hz = bw; if (err < 0) { - printk(KERN_ERR "%s: Invalid parameter\n", __func__); + printk("%s: Invalid parameter\n", __func__); return err; } @@ -74,14 +77,13 @@ static int stb6100_get_bandw(struct dvb_frontend *fe, u32 *bandwidth) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state state; int err = 0; - if (tuner_ops->get_state) { + if (tuner_ops->get_bandwidth) { if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 1); - err = tuner_ops->get_state(fe, DVBFE_TUNER_BANDWIDTH, &state); + err = tuner_ops->get_bandwidth(fe, bandwidth); if (err < 0) { printk(KERN_ERR "%s: Invalid parameter\n", __func__); return err; @@ -89,8 +91,6 @@ static int stb6100_get_bandw(struct dvb_frontend *fe, u32 *bandwidth) if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 0); - - *bandwidth = state.bandwidth; } return 0; @@ -100,16 +100,19 @@ static int stb6100_set_bandw(struct dvb_frontend *fe, u32 bandwidth) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state state; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u32 freq = c->frequency; int err = 0; - state.bandwidth = bandwidth; + c->bandwidth_hz = bandwidth; + c->frequency = 0; /* Don't adjust the frequency */ - if (tuner_ops->set_state) { + if (tuner_ops->set_params) { if (frontend_ops->i2c_gate_ctrl) frontend_ops->i2c_gate_ctrl(fe, 1); - err = tuner_ops->set_state(fe, DVBFE_TUNER_BANDWIDTH, &state); + err = tuner_ops->set_params(fe); + c->frequency = freq; if (err < 0) { printk(KERN_ERR "%s: Invalid parameter\n", __func__); return err; diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c index 63cc12378d9a..82f8cc534f33 100644 --- a/drivers/media/dvb-frontends/tda665x.c +++ b/drivers/media/dvb-frontends/tda665x.c @@ -66,26 +66,13 @@ exit: return err; } -static int tda665x_get_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *tstate) +static int tda665x_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tda665x_state *state = fe->tuner_priv; - int err = 0; - switch (param) { - case DVBFE_TUNER_FREQUENCY: - tstate->frequency = state->frequency; - break; - case DVBFE_TUNER_BANDWIDTH: - break; - default: - printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); - err = -EINVAL; - break; - } + *frequency = state->frequency; - return err; + return 0; } static int tda665x_get_status(struct dvb_frontend *fe, u32 *status) @@ -111,9 +98,8 @@ exit: return err; } -static int tda665x_set_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *tstate) +static int tda665x_set_frequency(struct dvb_frontend *fe, + u32 new_frequency) { struct tda665x_state *state = fe->tuner_priv; const struct tda665x_config *config = state->config; @@ -121,88 +107,96 @@ static int tda665x_set_state(struct dvb_frontend *fe, u8 buf[4]; int err = 0; - if (param & DVBFE_TUNER_FREQUENCY) { - - frequency = tstate->frequency; - if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) { - printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency); - return -EINVAL; - } - - frequency += config->frequency_offst; - frequency *= config->ref_multiplier; - frequency += config->ref_divider >> 1; - frequency /= config->ref_divider; - - buf[0] = (u8) ((frequency & 0x7f00) >> 8); - buf[1] = (u8) (frequency & 0x00ff) >> 0; - buf[2] = 0x80 | 0x40 | 0x02; - buf[3] = 0x00; - - /* restore frequency */ - frequency = tstate->frequency; - - if (frequency < 153000000) { - /* VHF-L */ - buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */ - if (frequency < 68000000) - buf[3] |= 0x40; /* 83uA */ - if (frequency < 1040000000) - buf[3] |= 0x60; /* 122uA */ - if (frequency < 1250000000) - buf[3] |= 0x80; /* 163uA */ - else - buf[3] |= 0xa0; /* 254uA */ - } else if (frequency < 438000000) { - /* VHF-H */ - buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */ - if (frequency < 230000000) - buf[3] |= 0x40; - if (frequency < 300000000) - buf[3] |= 0x60; - else - buf[3] |= 0x80; - } else { - /* UHF */ - buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */ - if (frequency < 470000000) - buf[3] |= 0x60; - if (frequency < 526000000) - buf[3] |= 0x80; - else - buf[3] |= 0xa0; - } - - /* Set params */ - err = tda665x_write(state, buf, 5); - if (err < 0) - goto exit; - - /* sleep for some time */ - printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__); - msleep(20); - /* check status */ - err = tda665x_get_status(fe, &status); - if (err < 0) - goto exit; - - if (status == 1) { - printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status); - state->frequency = frequency; /* cache successful state */ - } else { - printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status); - } - } else { - printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); + if ((new_frequency < config->frequency_max) + || (new_frequency > config->frequency_min)) { + printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", + __func__, new_frequency); return -EINVAL; } + frequency = new_frequency; + + frequency += config->frequency_offst; + frequency *= config->ref_multiplier; + frequency += config->ref_divider >> 1; + frequency /= config->ref_divider; + + buf[0] = (u8) ((frequency & 0x7f00) >> 8); + buf[1] = (u8) (frequency & 0x00ff) >> 0; + buf[2] = 0x80 | 0x40 | 0x02; + buf[3] = 0x00; + + /* restore frequency */ + frequency = new_frequency; + + if (frequency < 153000000) { + /* VHF-L */ + buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */ + if (frequency < 68000000) + buf[3] |= 0x40; /* 83uA */ + if (frequency < 1040000000) + buf[3] |= 0x60; /* 122uA */ + if (frequency < 1250000000) + buf[3] |= 0x80; /* 163uA */ + else + buf[3] |= 0xa0; /* 254uA */ + } else if (frequency < 438000000) { + /* VHF-H */ + buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */ + if (frequency < 230000000) + buf[3] |= 0x40; + if (frequency < 300000000) + buf[3] |= 0x60; + else + buf[3] |= 0x80; + } else { + /* UHF */ + buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */ + if (frequency < 470000000) + buf[3] |= 0x60; + if (frequency < 526000000) + buf[3] |= 0x80; + else + buf[3] |= 0xa0; + } + + /* Set params */ + err = tda665x_write(state, buf, 5); + if (err < 0) + goto exit; + + /* sleep for some time */ + printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__); + msleep(20); + /* check status */ + err = tda665x_get_status(fe, &status); + if (err < 0) + goto exit; + + if (status == 1) { + printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", + __func__, status); + state->frequency = frequency; /* cache successful state */ + } else { + printk(KERN_ERR "%s: No Phase lock: status=%d\n", + __func__, status); + } + return 0; exit: printk(KERN_ERR "%s: I/O Error\n", __func__); return err; } +static int tda665x_set_params(struct dvb_frontend *fe) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + + tda665x_set_frequency(fe, c->frequency); + + return 0; +} + static int tda665x_release(struct dvb_frontend *fe) { struct tda665x_state *state = fe->tuner_priv; @@ -213,10 +207,9 @@ static int tda665x_release(struct dvb_frontend *fe) } static struct dvb_tuner_ops tda665x_ops = { - - .set_state = tda665x_set_state, - .get_state = tda665x_get_state, .get_status = tda665x_get_status, + .set_params = tda665x_set_params, + .get_frequency = tda665x_get_frequency, .release = tda665x_release }; diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c index 19c488814e5c..3285b1bc4642 100644 --- a/drivers/media/dvb-frontends/tda8261.c +++ b/drivers/media/dvb-frontends/tda8261.c @@ -83,88 +83,71 @@ static int tda8261_get_status(struct dvb_frontend *fe, u32 *status) static const u32 div_tab[] = { 2000, 1000, 500, 250, 125 }; /* kHz */ static const u8 ref_div[] = { 0x00, 0x01, 0x02, 0x05, 0x07 }; -static int tda8261_get_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *tstate) +static int tda8261_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tda8261_state *state = fe->tuner_priv; - int err = 0; - switch (param) { - case DVBFE_TUNER_FREQUENCY: - tstate->frequency = state->frequency; - break; - case DVBFE_TUNER_BANDWIDTH: - tstate->bandwidth = 40000000; /* FIXME! need to calculate Bandwidth */ - break; - default: - pr_err("%s: Unknown parameter (param=%d)\n", __func__, param); - err = -EINVAL; - break; - } + *frequency = state->frequency; - return err; + return 0; } -static int tda8261_set_state(struct dvb_frontend *fe, - enum tuner_param param, - struct tuner_state *tstate) +static int tda8261_set_params(struct dvb_frontend *fe) { + struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct tda8261_state *state = fe->tuner_priv; const struct tda8261_config *config = state->config; u32 frequency, N, status = 0; u8 buf[4]; int err = 0; - if (param & DVBFE_TUNER_FREQUENCY) { - /** - * N = Max VCO Frequency / Channel Spacing - * Max VCO Frequency = VCO frequency + (channel spacing - 1) - * (to account for half channel spacing on either side) - */ - frequency = tstate->frequency; - if ((frequency < 950000) || (frequency > 2150000)) { - pr_warn("%s: Frequency beyond limits, frequency=%d\n", __func__, frequency); - return -EINVAL; - } - N = (frequency + (div_tab[config->step_size] - 1)) / div_tab[config->step_size]; - pr_debug("%s: Step size=%d, Divider=%d, PG=0x%02x (%d)\n", - __func__, config->step_size, div_tab[config->step_size], N, N); - - buf[0] = (N >> 8) & 0xff; - buf[1] = N & 0xff; - buf[2] = (0x01 << 7) | ((ref_div[config->step_size] & 0x07) << 1); - - if (frequency < 1450000) - buf[3] = 0x00; - else if (frequency < 2000000) - buf[3] = 0x40; - else if (frequency < 2150000) - buf[3] = 0x80; - - /* Set params */ - if ((err = tda8261_write(state, buf)) < 0) { - pr_err("%s: I/O Error\n", __func__); - return err; - } - /* sleep for some time */ - pr_debug("%s: Waiting to Phase LOCK\n", __func__); - msleep(20); - /* check status */ - if ((err = tda8261_get_status(fe, &status)) < 0) { - pr_err("%s: I/O Error\n", __func__); - return err; - } - if (status == 1) { - pr_debug("%s: Tuner Phase locked: status=%d\n", __func__, status); - state->frequency = frequency; /* cache successful state */ - } else { - pr_debug("%s: No Phase lock: status=%d\n", __func__, status); - } - } else { - pr_err("%s: Unknown parameter (param=%d)\n", __func__, param); + /* + * N = Max VCO Frequency / Channel Spacing + * Max VCO Frequency = VCO frequency + (channel spacing - 1) + * (to account for half channel spacing on either side) + */ + frequency = c->frequency; + if ((frequency < 950000) || (frequency > 2150000)) { + pr_warn("%s: Frequency beyond limits, frequency=%d\n", + __func__, frequency); return -EINVAL; } + N = (frequency + (div_tab[config->step_size] - 1)) / div_tab[config->step_size]; + pr_debug("%s: Step size=%d, Divider=%d, PG=0x%02x (%d)\n", + __func__, config->step_size, div_tab[config->step_size], N, N); + + buf[0] = (N >> 8) & 0xff; + buf[1] = N & 0xff; + buf[2] = (0x01 << 7) | ((ref_div[config->step_size] & 0x07) << 1); + + if (frequency < 1450000) + buf[3] = 0x00; + else if (frequency < 2000000) + buf[3] = 0x40; + else if (frequency < 2150000) + buf[3] = 0x80; + + /* Set params */ + err = tda8261_write(state, buf); + if (err < 0) { + pr_err("%s: I/O Error\n", __func__); + return err; + } + /* sleep for some time */ + pr_debug("%s: Waiting to Phase LOCK\n", __func__); + msleep(20); + /* check status */ + if ((err = tda8261_get_status(fe, &status)) < 0) { + pr_err("%s: I/O Error\n", __func__); + return err; + } + if (status == 1) { + pr_debug("%s: Tuner Phase locked: status=%d\n", __func__, + status); + state->frequency = frequency; /* cache successful state */ + } else { + pr_debug("%s: No Phase lock: status=%d\n", __func__, status); + } return 0; } @@ -182,14 +165,13 @@ static struct dvb_tuner_ops tda8261_ops = { .info = { .name = "TDA8261", -// .tuner_name = NULL, .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 0 }, - .set_state = tda8261_set_state, - .get_state = tda8261_get_state, + .set_params = tda8261_set_params, + .get_frequency = tda8261_get_frequency, .get_status = tda8261_get_status, .release = tda8261_release }; @@ -210,10 +192,7 @@ struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe, fe->ops.tuner_ops = tda8261_ops; fe->ops.tuner_ops.info.frequency_step = div_tab[config->step_size]; -// fe->ops.tuner_ops.tuner_name = &config->buf; -// printk("%s: Attaching %s TDA8261 8PSK/QPSK tuner\n", -// __func__, fe->ops.tuner_ops.tuner_name); pr_info("%s: Attaching TDA8261 8PSK/QPSK tuner\n", __func__); return fe; diff --git a/drivers/media/dvb-frontends/tda8261_cfg.h b/drivers/media/dvb-frontends/tda8261_cfg.h index 04a19e14ee5a..fe527ff84df4 100644 --- a/drivers/media/dvb-frontends/tda8261_cfg.h +++ b/drivers/media/dvb-frontends/tda8261_cfg.h @@ -21,17 +21,15 @@ static int tda8261_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; int err = 0; - if (tuner_ops->get_state) { - err = tuner_ops->get_state(fe, DVBFE_TUNER_FREQUENCY, &t_state); + if (tuner_ops->get_frequency) { + err = tuner_ops->get_frequency(fe, frequency); if (err < 0) { - printk("%s: Invalid parameter\n", __func__); + pr_err("%s: Invalid parameter\n", __func__); return err; } - *frequency = t_state.frequency; - printk("%s: Frequency=%d\n", __func__, t_state.frequency); + pr_debug("%s: Frequency=%d\n", __func__, *frequency); } return 0; } @@ -40,37 +38,24 @@ static int tda8261_set_frequency(struct dvb_frontend *fe, u32 frequency) { struct dvb_frontend_ops *frontend_ops = &fe->ops; struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; int err = 0; - t_state.frequency = frequency; - - if (tuner_ops->set_state) { - err = tuner_ops->set_state(fe, DVBFE_TUNER_FREQUENCY, &t_state); + if (tuner_ops->set_params) { + err = tuner_ops->set_params(fe); if (err < 0) { - printk("%s: Invalid parameter\n", __func__); + pr_err("%s: Invalid parameter\n", __func__); return err; } } - printk("%s: Frequency=%d\n", __func__, t_state.frequency); + pr_debug("%s: Frequency=%d\n", __func__, c->frequency); return 0; } static int tda8261_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { - struct dvb_frontend_ops *frontend_ops = &fe->ops; - struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops; - struct tuner_state t_state; - int err = 0; + /* FIXME! need to calculate Bandwidth */ + *bandwidth = 40000000; - if (tuner_ops->get_state) { - err = tuner_ops->get_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state); - if (err < 0) { - printk("%s: Invalid parameter\n", __func__); - return err; - } - *bandwidth = t_state.bandwidth; - printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth); - } return 0; } diff --git a/drivers/media/dvb-frontends/tdhd1.h b/drivers/media/dvb-frontends/tdhd1.h index 17750985db0c..2b9e8732c802 100644 --- a/drivers/media/dvb-frontends/tdhd1.h +++ b/drivers/media/dvb-frontends/tdhd1.h @@ -20,7 +20,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * The project's page is at http://www.linuxtv.org + * The project's page is at https://linuxtv.org */ #ifndef TDHD1_H diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 521bbf1b29bc..993dc50c12db 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -83,6 +83,16 @@ config VIDEO_MSP3400 To compile this driver as a module, choose M here: the module will be called msp3400. +config VIDEO_CS3308 + tristate "Cirrus Logic CS3308 audio ADC" + depends on VIDEO_V4L2 && I2C + ---help--- + Support for the Cirrus Logic CS3308 High Performance 8-Channel + Analog Volume Control + + To compile this driver as a module, choose M here: the + module will be called cs3308. + config VIDEO_CS5345 tristate "Cirrus Logic CS5345 audio ADC" depends on VIDEO_V4L2 && I2C diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 07db257abfc1..94f2c99e890d 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_VIDEO_TVP7002) += tvp7002.o obj-$(CONFIG_VIDEO_TW2804) += tw2804.o obj-$(CONFIG_VIDEO_TW9903) += tw9903.o obj-$(CONFIG_VIDEO_TW9906) += tw9906.o +obj-$(CONFIG_VIDEO_CS3308) += cs3308.o obj-$(CONFIG_VIDEO_CS5345) += cs5345.o obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o obj-$(CONFIG_VIDEO_M52790) += m52790.o diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c index 69094ab047b1..0494a7896aa2 100644 --- a/drivers/media/i2c/ad9389b.c +++ b/drivers/media/i2c/ad9389b.c @@ -35,7 +35,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ctrls.h> -#include <media/ad9389b.h> +#include <media/i2c/ad9389b.h> static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c index 5dd39775d6ca..f00745bbe471 100644 --- a/drivers/media/i2c/adp1653.c +++ b/drivers/media/i2c/adp1653.c @@ -37,7 +37,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio/consumer.h> -#include <media/adp1653.h> +#include <media/i2c/adp1653.h> #include <media/v4l2-device.h> #define TIMEOUT_MAX 820000 diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c index f82c8aa164fa..3c3c4bfe3866 100644 --- a/drivers/media/i2c/adv7180.c +++ b/drivers/media/i2c/adv7180.c @@ -1112,7 +1112,7 @@ static int init_device(struct adv7180_state *state) mutex_lock(&state->mutex); adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES); - usleep_range(2000, 10000); + usleep_range(5000, 10000); ret = state->chip_info->init(state); if (ret) diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c index e2dd1617662f..2bec737881e9 100644 --- a/drivers/media/i2c/adv7183.c +++ b/drivers/media/i2c/adv7183.c @@ -27,7 +27,7 @@ #include <linux/types.h> #include <linux/videodev2.h> -#include <media/adv7183.h> +#include <media/i2c/adv7183.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c index f89d0afcd964..11f9029433cf 100644 --- a/drivers/media/i2c/adv7343.c +++ b/drivers/media/i2c/adv7343.c @@ -28,7 +28,7 @@ #include <linux/of.h> #include <linux/of_graph.h> -#include <media/adv7343.h> +#include <media/i2c/adv7343.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c index 0215f95c2245..76d987476e35 100644 --- a/drivers/media/i2c/adv7393.c +++ b/drivers/media/i2c/adv7393.c @@ -31,7 +31,7 @@ #include <linux/videodev2.h> #include <linux/uaccess.h> -#include <media/adv7393.h> +#include <media/i2c/adv7393.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c index e4900df1140b..eeb2cd823c4d 100644 --- a/drivers/media/i2c/adv7511.c +++ b/drivers/media/i2c/adv7511.c @@ -32,7 +32,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dv-timings.h> -#include <media/adv7511.h> +#include <media/i2c/adv7511.h> static int debug; module_param(debug, int, 0644); @@ -1116,7 +1116,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5); adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6); adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2)); - adv7511_wr_and_or(sd, 0x59, 0x0f, yq << 4); + adv7511_wr_and_or(sd, 0x59, 0x3f, yq << 6); adv7511_wr_and_or(sd, 0x4a, 0xff, 1); return 0; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 5631ec004eed..745286225655 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -39,7 +39,7 @@ #include <linux/workqueue.h> #include <linux/regmap.h> -#include <media/adv7604.h> +#include <media/i2c/adv7604.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> @@ -905,7 +905,7 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd, for (i = 0; predef_vid_timings[i].timings.bt.width; i++) { if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings, - is_digital_input(sd) ? 250000 : 1000000)) + is_digital_input(sd) ? 250000 : 1000000, false)) continue; io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */ io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) + @@ -1479,7 +1479,7 @@ static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd, for (i = 0; adv76xx_timings[i].bt.width; i++) { if (v4l2_match_dv_timings(timings, &adv76xx_timings[i], - is_digital_input(sd) ? 250000 : 1000000)) { + is_digital_input(sd) ? 250000 : 1000000, false)) { *timings = adv76xx_timings[i]; break; } @@ -1644,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd, if (!timings) return -EINVAL; - if (v4l2_match_dv_timings(&state->timings, timings, 0)) { + if (v4l2_match_dv_timings(&state->timings, timings, 0, false)) { v4l2_dbg(1, debug, sd, "%s: no change\n", __func__); return 0; } diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index b7269b8f040d..69378e4914b6 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -43,7 +43,7 @@ #include <media/v4l2-event.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dv-timings.h> -#include <media/adv7842.h> +#include <media/i2c/adv7842.h> static int debug; module_param(debug, int, 0644); @@ -155,7 +155,7 @@ static bool adv7842_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl) int i; for (i = 0; adv7842_timings_exceptions[i].bt.width; i++) - if (v4l2_match_dv_timings(t, adv7842_timings_exceptions + i, 0)) + if (v4l2_match_dv_timings(t, adv7842_timings_exceptions + i, 0, false)) return false; return true; } @@ -1008,7 +1008,7 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd, for (i = 0; predef_vid_timings[i].timings.bt.width; i++) { if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings, - is_digital_input(sd) ? 250000 : 1000000)) + is_digital_input(sd) ? 250000 : 1000000, false)) continue; /* video std */ io_write(sd, 0x00, predef_vid_timings[i].vid_std); @@ -1659,7 +1659,7 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd, if (state->mode == ADV7842_MODE_SDP) return -ENODATA; - if (v4l2_match_dv_timings(&state->timings, timings, 0)) { + if (v4l2_match_dv_timings(&state->timings, timings, 0, false)) { v4l2_dbg(1, debug, sd, "%s: no change\n", __func__); return 0; } diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c index d3b965ec3bbc..d9f2b6b76d59 100644 --- a/drivers/media/i2c/ak881x.c +++ b/drivers/media/i2c/ak881x.c @@ -15,7 +15,7 @@ #include <linux/videodev2.h> #include <linux/module.h> -#include <media/ak881x.h> +#include <media/i2c/ak881x.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/as3645a.c b/drivers/media/i2c/as3645a.c index 301084b07887..29a2e7034aa6 100644 --- a/drivers/media/i2c/as3645a.c +++ b/drivers/media/i2c/as3645a.c @@ -31,7 +31,7 @@ #include <linux/mutex.h> #include <linux/slab.h> -#include <media/as3645a.h> +#include <media/i2c/as3645a.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c index e00e3104d448..7907bcfbaed3 100644 --- a/drivers/media/i2c/bt819.c +++ b/drivers/media/i2c/bt819.c @@ -37,7 +37,7 @@ #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> -#include <media/bt819.h> +#include <media/i2c/bt819.h> MODULE_DESCRIPTION("Brooktree-819 video decoder driver"); MODULE_AUTHOR("Mike Bernson & Dave Perks"); diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c new file mode 100644 index 000000000000..d28b4f37fe5f --- /dev/null +++ b/drivers/media/i2c/cs3308.c @@ -0,0 +1,138 @@ +/* + * Cirrus Logic cs3308 8-Channel Analog Volume Control + * + * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> + * Copyright (C) 2012 Steven Toth <stoth@kernellabs.com> + * + * Derived from cs5345.c Copyright (C) 2007 Hans Verkuil + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/videodev2.h> +#include <media/v4l2-device.h> + +MODULE_DESCRIPTION("i2c device driver for cs3308 8-channel volume control"); +MODULE_AUTHOR("Devin Heitmueller"); +MODULE_LICENSE("GPL"); + +static inline int cs3308_write(struct v4l2_subdev *sd, u8 reg, u8 value) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return i2c_smbus_write_byte_data(client, reg, value); +} + +static inline int cs3308_read(struct v4l2_subdev *sd, u8 reg) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return i2c_smbus_read_byte_data(client, reg); +} + +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int cs3308_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) +{ + reg->val = cs3308_read(sd, reg->reg & 0xffff); + reg->size = 1; + return 0; +} + +static int cs3308_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) +{ + cs3308_write(sd, reg->reg & 0xffff, reg->val & 0xff); + return 0; +} +#endif + +/* ----------------------------------------------------------------------- */ + +static const struct v4l2_subdev_core_ops cs3308_core_ops = { +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = cs3308_g_register, + .s_register = cs3308_s_register, +#endif +}; + +static const struct v4l2_subdev_ops cs3308_ops = { + .core = &cs3308_core_ops, +}; + +/* ----------------------------------------------------------------------- */ + +static int cs3308_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct v4l2_subdev *sd; + unsigned i; + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + if ((i2c_smbus_read_byte_data(client, 0x1c) & 0xf0) != 0xe0) + return -ENODEV; + + v4l_info(client, "chip found @ 0x%x (%s)\n", + client->addr << 1, client->adapter->name); + + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + if (sd == NULL) + return -ENOMEM; + + v4l2_i2c_subdev_init(sd, client, &cs3308_ops); + + /* Set some reasonable defaults */ + cs3308_write(sd, 0x0d, 0x00); /* Power up all channels */ + cs3308_write(sd, 0x0e, 0x00); /* Master Power */ + cs3308_write(sd, 0x0b, 0x00); /* Device Configuration */ + /* Set volume for each channel */ + for (i = 1; i <= 8; i++) + cs3308_write(sd, i, 0xd2); + cs3308_write(sd, 0x0a, 0x00); /* Unmute all channels */ + return 0; +} + +/* ----------------------------------------------------------------------- */ + +static int cs3308_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + + v4l2_device_unregister_subdev(sd); + kfree(sd); + return 0; +} + +/* ----------------------------------------------------------------------- */ + +static const struct i2c_device_id cs3308_id[] = { + { "cs3308", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, cs3308_id); + +static struct i2c_driver cs3308_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "cs3308", + }, + .probe = cs3308_probe, + .remove = cs3308_remove, + .id_table = cs3308_id, +}; + +module_i2c_driver(cs3308_driver); diff --git a/drivers/media/i2c/cx25840/cx25840-audio.c b/drivers/media/i2c/cx25840/cx25840-audio.c index 34b96c7cfd62..baf3d9c8710e 100644 --- a/drivers/media/i2c/cx25840/cx25840-audio.c +++ b/drivers/media/i2c/cx25840/cx25840-audio.c @@ -19,7 +19,7 @@ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-common.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include "cx25840-core.h" diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index fe6eb78b6914..f2e2c34ddbbd 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -45,7 +45,7 @@ #include <linux/delay.h> #include <linux/math64.h> #include <media/v4l2-common.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include "cx25840-core.h" @@ -559,7 +559,10 @@ static void cx23885_initialize(struct i2c_client *client) cx25840_write4(client, 0x414, 0x00107d12); /* Chroma */ - cx25840_write4(client, 0x420, 0x3d008282); + if (is_cx23888(state)) + cx25840_write4(client, 0x418, 0x1d008282); + else + cx25840_write4(client, 0x420, 0x3d008282); /* * Aux PLL @@ -666,14 +669,17 @@ static void cx23885_initialize(struct i2c_client *client) cx25840_write4(client, 0x404, 0x0010253e); /* CC on - Undocumented Register */ - cx25840_write(client, 0x42f, 0x66); + cx25840_write(client, state->vbi_regs_offset + 0x42f, 0x66); /* HVR-1250 / HVR1850 DIF related */ /* Power everything up */ cx25840_write4(client, 0x130, 0x0); /* Undocumented */ - cx25840_write4(client, 0x478, 0x6628021F); + if (is_cx23888(state)) + cx25840_write4(client, 0x454, 0x6628021F); + else + cx25840_write4(client, 0x478, 0x6628021F); /* AFE_CLK_OUT_CTRL - Select the clock output source as output */ cx25840_write4(client, 0x144, 0x5); @@ -1106,31 +1112,15 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp cx25840_write4(client, 0x410, 0xffff0dbf); cx25840_write4(client, 0x414, 0x00137d03); - /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is - CHROMA_CTRL */ - if (is_cx23888(state)) - cx25840_write4(client, 0x418, 0x01008080); - else - cx25840_write4(client, 0x418, 0x01000000); + cx25840_write4(client, state->vbi_regs_offset + 0x42c, 0x42600000); + cx25840_write4(client, state->vbi_regs_offset + 0x430, 0x0000039b); + cx25840_write4(client, state->vbi_regs_offset + 0x438, 0x00000000); - cx25840_write4(client, 0x41c, 0x00000000); - - /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is - CRUSH_CTRL */ - if (is_cx23888(state)) - cx25840_write4(client, 0x420, 0x001c3e0f); - else - cx25840_write4(client, 0x420, 0x001c8282); - - cx25840_write4(client, 0x42c, 0x42600000); - cx25840_write4(client, 0x430, 0x0000039b); - cx25840_write4(client, 0x438, 0x00000000); - - cx25840_write4(client, 0x440, 0xF8E3E824); - cx25840_write4(client, 0x444, 0x401040dc); - cx25840_write4(client, 0x448, 0xcd3f02a0); - cx25840_write4(client, 0x44c, 0x161f1000); - cx25840_write4(client, 0x450, 0x00000802); + cx25840_write4(client, state->vbi_regs_offset + 0x440, 0xF8E3E824); + cx25840_write4(client, state->vbi_regs_offset + 0x444, 0x401040dc); + cx25840_write4(client, state->vbi_regs_offset + 0x448, 0xcd3f02a0); + cx25840_write4(client, state->vbi_regs_offset + 0x44c, 0x161f1000); + cx25840_write4(client, state->vbi_regs_offset + 0x450, 0x00000802); cx25840_write4(client, 0x91c, 0x01000000); cx25840_write4(client, 0x8e0, 0x03063870); @@ -1400,8 +1390,14 @@ static int cx25840_set_fmt(struct v4l2_subdev *sd, Vlines = fmt->height + (is_50Hz ? 4 : 7); + /* + * We keep 1 margin for the Vsrc < Vlines check since the + * cx23888 reports a Vsrc of 486 instead of 487 for the NTSC + * height. Without that margin the cx23885 fails in this + * check. + */ if ((fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) || - (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) { + (Vlines * 8 < Vsrc) || (Vsrc + 1 < Vlines)) { v4l_err(client, "%dx%d is not a valid size!\n", fmt->width, fmt->height); return -ERANGE; @@ -1426,14 +1422,20 @@ static int cx25840_set_fmt(struct v4l2_subdev *sd, fmt->width, fmt->height, HSC, VSC); /* HSCALE=HSC */ - cx25840_write(client, 0x418, HSC & 0xff); - cx25840_write(client, 0x419, (HSC >> 8) & 0xff); - cx25840_write(client, 0x41a, HSC >> 16); - /* VSCALE=VSC */ - cx25840_write(client, 0x41c, VSC & 0xff); - cx25840_write(client, 0x41d, VSC >> 8); - /* VS_INTRLACE=1 VFILT=filter */ - cx25840_write(client, 0x41e, 0x8 | filter); + if (is_cx23888(state)) { + cx25840_write4(client, 0x434, HSC | (1 << 24)); + /* VSCALE=VSC VS_INTRLACE=1 VFILT=filter */ + cx25840_write4(client, 0x438, VSC | (1 << 19) | (filter << 16)); + } else { + cx25840_write(client, 0x418, HSC & 0xff); + cx25840_write(client, 0x419, (HSC >> 8) & 0xff); + cx25840_write(client, 0x41a, HSC >> 16); + /* VSCALE=VSC */ + cx25840_write(client, 0x41c, VSC & 0xff); + cx25840_write(client, 0x41d, VSC >> 8); + /* VS_INTRLACE=1 VFILT=filter */ + cx25840_write(client, 0x41e, 0x8 | filter); + } return 0; } @@ -1714,26 +1716,27 @@ static int cx25840_s_stream(struct v4l2_subdev *sd, int enable) v4l_dbg(1, cx25840_debug, client, "%s video output\n", enable ? "enable" : "disable"); + + /* + * It's not clear what should be done for these devices. + * The original code used the same addresses as for the cx25840, but + * those addresses do something else entirely on the cx2388x and + * cx231xx. Since it never did anything in the first place, just do + * nothing. + */ + if (is_cx2388x(state) || is_cx231xx(state)) + return 0; + if (enable) { - if (is_cx2388x(state) || is_cx231xx(state)) { - v = cx25840_read(client, 0x421) | 0x0b; - cx25840_write(client, 0x421, v); - } else { - v = cx25840_read(client, 0x115) | 0x0c; - cx25840_write(client, 0x115, v); - v = cx25840_read(client, 0x116) | 0x04; - cx25840_write(client, 0x116, v); - } + v = cx25840_read(client, 0x115) | 0x0c; + cx25840_write(client, 0x115, v); + v = cx25840_read(client, 0x116) | 0x04; + cx25840_write(client, 0x116, v); } else { - if (is_cx2388x(state) || is_cx231xx(state)) { - v = cx25840_read(client, 0x421) & ~(0x0b); - cx25840_write(client, 0x421, v); - } else { - v = cx25840_read(client, 0x115) & ~(0x0c); - cx25840_write(client, 0x115, v); - v = cx25840_read(client, 0x116) & ~(0x04); - cx25840_write(client, 0x116, v); - } + v = cx25840_read(client, 0x115) & ~(0x0c); + cx25840_write(client, 0x115, v); + v = cx25840_read(client, 0x116) & ~(0x04); + cx25840_write(client, 0x116, v); } return 0; } @@ -4974,7 +4977,7 @@ static void cx23888_std_setup(struct i2c_client *client) cx25840_write4(client, 0x4b4, 0x20524030); cx25840_write4(client, 0x47c, 0x010a8263); - if (std & V4L2_STD_NTSC) { + if (std & V4L2_STD_525_60) { v4l_dbg(1, cx25840_debug, client, "%s() Selecting NTSC", __func__); @@ -5264,6 +5267,8 @@ static int cx25840_probe(struct i2c_client *client, state->vbi_line_offset = 8; state->id = id; state->rev = device_id; + state->vbi_regs_offset = id == CX23888_AV ? 0x500 - 0x424 : 0; + state->std = V4L2_STD_NTSC_M; v4l2_ctrl_handler_init(&state->hdl, 9); v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h index fdea48ce0c03..254ef45ce41a 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.h +++ b/drivers/media/i2c/cx25840/cx25840-core.h @@ -69,6 +69,7 @@ struct cx25840_state { enum cx25840_model id; u32 rev; int is_initialized; + unsigned vbi_regs_offset; wait_queue_head_t fw_wait; /* wake up when the fw load is finished */ struct work_struct fw_work; /* work entry for fw load */ struct cx25840_ir_state *ir_state; diff --git a/drivers/media/i2c/cx25840/cx25840-firmware.c b/drivers/media/i2c/cx25840/cx25840-firmware.c index 9bbb31adc29d..37e052923a87 100644 --- a/drivers/media/i2c/cx25840/cx25840-firmware.c +++ b/drivers/media/i2c/cx25840/cx25840-firmware.c @@ -19,7 +19,7 @@ #include <linux/i2c.h> #include <linux/firmware.h> #include <media/v4l2-common.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include "cx25840-core.h" diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c index 4cf8f18bf097..4b782012cadc 100644 --- a/drivers/media/i2c/cx25840/cx25840-ir.c +++ b/drivers/media/i2c/cx25840/cx25840-ir.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/module.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include <media/rc-core.h> #include "cx25840-core.h" diff --git a/drivers/media/i2c/cx25840/cx25840-vbi.c b/drivers/media/i2c/cx25840/cx25840-vbi.c index c39e91dc1137..0470bb6128e1 100644 --- a/drivers/media/i2c/cx25840/cx25840-vbi.c +++ b/drivers/media/i2c/cx25840/cx25840-vbi.c @@ -19,7 +19,7 @@ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-common.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include "cx25840-core.h" @@ -104,7 +104,8 @@ int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format * if (is_pal) { for (i = 7; i <= 23; i++) { - u8 v = cx25840_read(client, 0x424 + i - 7); + u8 v = cx25840_read(client, + state->vbi_regs_offset + 0x424 + i - 7); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; @@ -113,7 +114,8 @@ int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format * } } else { for (i = 10; i <= 21; i++) { - u8 v = cx25840_read(client, 0x424 + i - 10); + u8 v = cx25840_read(client, + state->vbi_regs_offset + 0x424 + i - 10); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; @@ -135,7 +137,10 @@ int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt) cx25840_std_setup(client); /* VBI Offset */ - cx25840_write(client, 0x47f, vbi_offset); + if (is_cx23888(state)) + cx25840_write(client, 0x54f, vbi_offset); + else + cx25840_write(client, 0x47f, vbi_offset); cx25840_write(client, 0x404, 0x2e); return 0; } @@ -158,7 +163,10 @@ int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format * /* Sliced VBI */ cx25840_write(client, 0x404, 0x32); /* Ancillary data */ cx25840_write(client, 0x406, 0x13); - cx25840_write(client, 0x47f, vbi_offset); + if (is_cx23888(state)) + cx25840_write(client, 0x54f, vbi_offset); + else + cx25840_write(client, 0x47f, vbi_offset); if (is_pal) { for (i = 0; i <= 6; i++) @@ -194,17 +202,23 @@ int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format * } if (is_pal) { - for (x = 1, i = 0x424; i <= 0x434; i++, x++) + for (x = 1, i = state->vbi_regs_offset + 0x424; + i <= state->vbi_regs_offset + 0x434; i++, x++) cx25840_write(client, i, lcr[6 + x]); } else { - for (x = 1, i = 0x424; i <= 0x430; i++, x++) + for (x = 1, i = state->vbi_regs_offset + 0x424; + i <= state->vbi_regs_offset + 0x430; i++, x++) cx25840_write(client, i, lcr[9 + x]); - for (i = 0x431; i <= 0x434; i++) + for (i = state->vbi_regs_offset + 0x431; + i <= state->vbi_regs_offset + 0x434; i++) cx25840_write(client, i, 0); } - cx25840_write(client, 0x43c, 0x16); - cx25840_write(client, 0x474, is_pal ? 0x2a : 0x22); + cx25840_write(client, state->vbi_regs_offset + 0x43c, 0x16); + if (is_cx23888(state)) + cx25840_write(client, 0x428, is_pal ? 0x2a : 0x22); + else + cx25840_write(client, 0x474, is_pal ? 0x2a : 0x22); return 0; } diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index 728d2cc8a3e7..830491960add 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -47,7 +47,7 @@ #include <linux/workqueue.h> #include <media/rc-core.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> /* ----------------------------------------------------------------------- */ /* insmod parameters */ diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c index d9ece4b2d047..19ecb8801064 100644 --- a/drivers/media/i2c/lm3560.c +++ b/drivers/media/i2c/lm3560.c @@ -24,7 +24,7 @@ #include <linux/mutex.h> #include <linux/regmap.h> #include <linux/videodev2.h> -#include <media/lm3560.h> +#include <media/i2c/lm3560.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c index 626fb4679c02..7fbe6ff1c4f4 100644 --- a/drivers/media/i2c/lm3646.c +++ b/drivers/media/i2c/lm3646.c @@ -18,7 +18,7 @@ #include <linux/slab.h> #include <linux/regmap.h> #include <linux/videodev2.h> -#include <media/lm3646.h> +#include <media/i2c/lm3646.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c index 77eb07eb667e..81171d8e1c2c 100644 --- a/drivers/media/i2c/m52790.c +++ b/drivers/media/i2c/m52790.c @@ -27,7 +27,7 @@ #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> -#include <media/m52790.h> +#include <media/i2c/m52790.h> #include <media/v4l2-device.h> MODULE_DESCRIPTION("i2c device driver for m52790 A/V switch"); diff --git a/drivers/media/i2c/m5mols/m5mols_capture.c b/drivers/media/i2c/m5mols/m5mols_capture.c index 1a03d02bd4d1..a0cd6dc32eb0 100644 --- a/drivers/media/i2c/m5mols/m5mols_capture.c +++ b/drivers/media/i2c/m5mols/m5mols_capture.c @@ -25,8 +25,8 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> -#include <media/m5mols.h> -#include <media/exynos-fimc.h> +#include <media/i2c/m5mols.h> +#include <media/drv-intf/exynos-fimc.h> #include "m5mols.h" #include "m5mols_reg.h" diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 6404c0d93e7a..f8993933416e 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -25,7 +25,7 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> -#include <media/m5mols.h> +#include <media/i2c/m5mols.h> #include "m5mols.h" #include "m5mols_reg.h" diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c index bdb94000ba5a..a84561d0d4a8 100644 --- a/drivers/media/i2c/msp3400-driver.c +++ b/drivers/media/i2c/msp3400-driver.c @@ -56,8 +56,8 @@ #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> -#include <media/msp3400.h> -#include <media/tvaudio.h> +#include <media/drv-intf/msp3400.h> +#include <media/i2c/tvaudio.h> #include "msp3400-driver.h" /* ---------------------------------------------------------------------- */ diff --git a/drivers/media/i2c/msp3400-driver.h b/drivers/media/i2c/msp3400-driver.h index fbe5e0715f93..6cae21366ed5 100644 --- a/drivers/media/i2c/msp3400-driver.h +++ b/drivers/media/i2c/msp3400-driver.h @@ -4,7 +4,7 @@ #ifndef MSP3400_DRIVER_H #define MSP3400_DRIVER_H -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c index f8b51714f2f9..17120804fab7 100644 --- a/drivers/media/i2c/msp3400-kthreads.c +++ b/drivers/media/i2c/msp3400-kthreads.c @@ -26,7 +26,7 @@ #include <linux/freezer.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <linux/kthread.h> #include <linux/suspend.h> #include "msp3400-driver.h" diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c index c7747bd0cabb..101cb26f9330 100644 --- a/drivers/media/i2c/mt9m032.c +++ b/drivers/media/i2c/mt9m032.c @@ -31,7 +31,7 @@ #include <linux/v4l2-mediabus.h> #include <media/media-entity.h> -#include <media/mt9m032.h> +#include <media/i2c/mt9m032.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> @@ -671,7 +671,7 @@ static int mt9m032_set_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops mt9m032_ctrl_ops = { +static const struct v4l2_ctrl_ops mt9m032_ctrl_ops = { .s_ctrl = mt9m032_set_ctrl, .try_ctrl = mt9m032_try_ctrl, }; diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index 0db15f528ac1..a3da0e977d0b 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c @@ -26,7 +26,7 @@ #include <linux/slab.h> #include <linux/videodev2.h> -#include <media/mt9p031.h> +#include <media/i2c/mt9p031.h> #include <media/v4l2-async.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> @@ -817,7 +817,7 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops mt9p031_ctrl_ops = { +static const struct v4l2_ctrl_ops mt9p031_ctrl_ops = { .s_ctrl = mt9p031_s_ctrl, }; diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c index 8ae99f7f254c..b28fdff1d310 100644 --- a/drivers/media/i2c/mt9t001.c +++ b/drivers/media/i2c/mt9t001.c @@ -21,7 +21,7 @@ #include <linux/videodev2.h> #include <linux/v4l2-mediabus.h> -#include <media/mt9t001.h> +#include <media/i2c/mt9t001.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> @@ -626,7 +626,7 @@ static int mt9t001_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops mt9t001_ctrl_ops = { +static const struct v4l2_ctrl_ops mt9t001_ctrl_ops = { .s_ctrl = mt9t001_s_ctrl, }; diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c index a4a5c39b599b..b9fea11d6b0b 100644 --- a/drivers/media/i2c/mt9v011.c +++ b/drivers/media/i2c/mt9v011.c @@ -13,7 +13,7 @@ #include <asm/div64.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> -#include <media/mt9v011.h> +#include <media/i2c/mt9v011.h> MODULE_DESCRIPTION("Micron mt9v011 sensor driver"); MODULE_AUTHOR("Mauro Carvalho Chehab"); @@ -454,7 +454,7 @@ static int mt9v011_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops mt9v011_ctrl_ops = { +static const struct v4l2_ctrl_ops mt9v011_ctrl_ops = { .s_ctrl = mt9v011_s_ctrl, }; diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c index a68ce94ee097..1dbbd23fdfb0 100644 --- a/drivers/media/i2c/mt9v032.c +++ b/drivers/media/i2c/mt9v032.c @@ -25,7 +25,7 @@ #include <linux/v4l2-mediabus.h> #include <linux/module.h> -#include <media/mt9v032.h> +#include <media/i2c/mt9v032.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-of.h> @@ -703,7 +703,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops mt9v032_ctrl_ops = { +static const struct v4l2_ctrl_ops mt9v032_ctrl_ops = { .s_ctrl = mt9v032_s_ctrl, }; diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c index f197b6cbd407..69e4f3031d8b 100644 --- a/drivers/media/i2c/noon010pc30.c +++ b/drivers/media/i2c/noon010pc30.c @@ -18,7 +18,7 @@ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <media/noon010pc30.h> +#include <media/i2c/noon010pc30.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index 49109f4f5bb4..82c7ac1cc88e 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -37,7 +37,7 @@ #include <linux/videodev2.h> #include <media/media-entity.h> -#include <media/ov2659.h> +#include <media/i2c/ov2659.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> @@ -1249,7 +1249,7 @@ static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_ctrl_ops ov2659_ctrl_ops = { +static const struct v4l2_ctrl_ops ov2659_ctrl_ops = { .s_ctrl = ov2659_s_ctrl, }; diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index e1b5dc84c14e..56cfb5ca9c95 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -20,7 +20,7 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-image-sizes.h> -#include <media/ov7670.h> +#include <media/i2c/ov7670.h> MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>"); MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors"); diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 1ee6a5527c38..9fe9006474b2 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -29,7 +29,7 @@ #include <media/v4l2-image-sizes.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> -#include <media/ov9650.h> +#include <media/i2c/ov9650.h> static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 51b26010403c..25f5e79dc9bc 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -34,7 +34,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> -#include <media/s5c73m3.h> +#include <media/i2c/s5c73m3.h> #include <media/v4l2-of.h> #include "s5c73m3.h" diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c index 8001cde1db1e..0a060339e516 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c @@ -32,7 +32,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> -#include <media/s5c73m3.h> +#include <media/i2c/s5c73m3.h> #include "s5c73m3.h" diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c index 72ef9f936e6c..7d65b36434b1 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c @@ -37,6 +37,7 @@ enum spi_direction { SPI_DIR_RX, SPI_DIR_TX }; +MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids); static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len, enum spi_direction dir) diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h index 13aed59f0f5d..653f68e7ea07 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3.h +++ b/drivers/media/i2c/s5c73m3/s5c73m3.h @@ -23,7 +23,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-subdev.h> -#include <media/s5c73m3.h> +#include <media/i2c/s5c73m3.h> #define DRIVER_NAME "S5C73M3" diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c index 97084237275d..6757aca2cdab 100644 --- a/drivers/media/i2c/s5k4ecgx.c +++ b/drivers/media/i2c/s5k4ecgx.c @@ -27,7 +27,7 @@ #include <asm/unaligned.h> #include <media/media-entity.h> -#include <media/s5k4ecgx.h> +#include <media/i2c/s5k4ecgx.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c index d0ad6a25bdab..60aaff7190d2 100644 --- a/drivers/media/i2c/s5k6aa.c +++ b/drivers/media/i2c/s5k6aa.c @@ -28,7 +28,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> -#include <media/s5k6aa.h> +#include <media/i2c/s5k6aa.h> static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c index 37e65f661d7a..89e458c23983 100644 --- a/drivers/media/i2c/saa6588.c +++ b/drivers/media/i2c/saa6588.c @@ -31,7 +31,7 @@ #include <linux/wait.h> #include <asm/uaccess.h> -#include <media/saa6588.h> +#include <media/i2c/saa6588.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 91e75222c537..24d2b76dbe97 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -46,7 +46,7 @@ #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <asm/div64.h> #define VRES_60HZ (480+16) diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c index a43d96da1017..8d94dcbf4366 100644 --- a/drivers/media/i2c/saa7127.c +++ b/drivers/media/i2c/saa7127.c @@ -54,7 +54,7 @@ #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> -#include <media/saa7127.h> +#include <media/i2c/saa7127.h> static int debug; static int test_image; diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h index ed010a8a49d7..f6af0cc4a256 100644 --- a/drivers/media/i2c/smiapp/smiapp.h +++ b/drivers/media/i2c/smiapp/smiapp.h @@ -22,7 +22,7 @@ #include <linux/mutex.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-subdev.h> -#include <media/smiapp.h> +#include <media/i2c/smiapp.h> #include "smiapp-pll.h" #include "smiapp-reg.h" diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c index 4fbdd1e9f7ee..2e14e52ba2e0 100644 --- a/drivers/media/i2c/soc_camera/mt9m001.c +++ b/drivers/media/i2c/soc_camera/mt9m001.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-clk.h> #include <media/v4l2-subdev.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c index 2f35d31ca58e..6a1b2a9f9a09 100644 --- a/drivers/media/i2c/soc_camera/mt9t112.c +++ b/drivers/media/i2c/soc_camera/mt9t112.c @@ -25,7 +25,7 @@ #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> -#include <media/mt9t112.h> +#include <media/i2c/mt9t112.h> #include <media/soc_camera.h> #include <media/v4l2-clk.h> #include <media/v4l2-common.h> diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c index f31377408550..c2ba1fb3694d 100644 --- a/drivers/media/i2c/soc_camera/mt9v022.c +++ b/drivers/media/i2c/soc_camera/mt9v022.c @@ -15,9 +15,9 @@ #include <linux/log2.h> #include <linux/module.h> -#include <media/mt9v022.h> +#include <media/i2c/mt9v022.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-subdev.h> #include <media/v4l2-clk.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c index f150a8bd94dc..a43410c1e254 100644 --- a/drivers/media/i2c/soc_camera/ov772x.c +++ b/drivers/media/i2c/soc_camera/ov772x.c @@ -24,7 +24,7 @@ #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> -#include <media/ov772x.h> +#include <media/i2c/ov772x.h> #include <media/soc_camera.h> #include <media/v4l2-clk.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c index c769cf663f84..aa7bfbb4ad71 100644 --- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c +++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c @@ -15,7 +15,7 @@ #include <linux/videodev2.h> #include <linux/module.h> -#include <media/rj54n1cb0c.h> +#include <media/i2c/rj54n1cb0c.h> #include <media/soc_camera.h> #include <media/v4l2-clk.h> #include <media/v4l2-subdev.h> diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c index e939c24bfd3c..06aff81787a7 100644 --- a/drivers/media/i2c/soc_camera/tw9910.c +++ b/drivers/media/i2c/soc_camera/tw9910.c @@ -26,7 +26,7 @@ #include <linux/videodev2.h> #include <media/soc_camera.h> -#include <media/tw9910.h> +#include <media/i2c/tw9910.h> #include <media/v4l2-clk.h> #include <media/v4l2-subdev.h> diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c index b04c09dd4bfb..0bf031b7e4fa 100644 --- a/drivers/media/i2c/sr030pc30.c +++ b/drivers/media/i2c/sr030pc30.c @@ -24,7 +24,7 @@ #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-ctrls.h> -#include <media/sr030pc30.h> +#include <media/i2c/sr030pc30.h> static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 9ef5baaf8646..77b801152ea5 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -42,7 +42,7 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-of.h> -#include <media/tc358743.h> +#include <media/i2c/tc358743.h> #include "tc358743_regs.h" @@ -862,7 +862,7 @@ static void tc358743_format_change(struct v4l2_subdev *sd) v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n", __func__); } else { - if (!v4l2_match_dv_timings(&state->timings, &timings, 0)) + if (!v4l2_match_dv_timings(&state->timings, &timings, 0, false)) enable_stream(sd, false); v4l2_print_dv_timings(sd->name, @@ -1366,7 +1366,7 @@ static int tc358743_s_dv_timings(struct v4l2_subdev *sd, v4l2_print_dv_timings(sd->name, "tc358743_s_dv_timings: ", timings, false); - if (v4l2_match_dv_timings(&state->timings, timings, 0)) { + if (v4l2_match_dv_timings(&state->timings, timings, 0, false)) { v4l2_dbg(1, debug, sd, "%s: no change\n", __func__); return 0; } diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c index bda3a6540a60..5bbfcab01c75 100644 --- a/drivers/media/i2c/ths7303.c +++ b/drivers/media/i2c/ths7303.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <linux/slab.h> -#include <media/ths7303.h> +#include <media/i2c/ths7303.h> #include <media/v4l2-device.h> #define THS7303_CHANNEL_1 1 diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c index 2a8114a676fd..fece2a4339a1 100644 --- a/drivers/media/i2c/tvaudio.c +++ b/drivers/media/i2c/tvaudio.c @@ -36,7 +36,7 @@ #include <linux/kthread.h> #include <linux/freezer.h> -#include <media/tvaudio.h> +#include <media/i2c/tvaudio.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c index a93985a9b070..b5dba5b7ce3a 100644 --- a/drivers/media/i2c/tvp514x.c +++ b/drivers/media/i2c/tvp514x.c @@ -44,7 +44,7 @@ #include <media/v4l2-mediabus.h> #include <media/v4l2-of.h> #include <media/v4l2-ctrls.h> -#include <media/tvp514x.h> +#include <media/i2c/tvp514x.h> #include <media/media-entity.h> #include "tvp514x_regs.h" diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3c5fb2509c47..6c3769d44b75 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -12,7 +12,7 @@ #include <linux/module.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> -#include <media/tvp5150.h> +#include <media/i2c/tvp5150.h> #include <media/v4l2-ctrls.h> #include "tvp5150_reg.h" diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index f617d8b745ee..772a3043ae3b 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -32,7 +32,7 @@ #include <linux/of.h> #include <linux/of_graph.h> #include <linux/v4l2-dv-timings.h> -#include <media/tvp7002.h> +#include <media/i2c/tvp7002.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> #include <media/v4l2-common.h> diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c index 081786d176d0..8e17a83920d4 100644 --- a/drivers/media/i2c/uda1342.c +++ b/drivers/media/i2c/uda1342.c @@ -20,7 +20,7 @@ #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> -#include <media/uda1342.h> +#include <media/i2c/uda1342.h> #include <linux/slab.h> static int write_reg(struct i2c_client *client, int reg, int value) diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c index 2c0f955abc72..c03567e993cd 100644 --- a/drivers/media/i2c/upd64031a.c +++ b/drivers/media/i2c/upd64031a.c @@ -27,7 +27,7 @@ #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> -#include <media/upd64031a.h> +#include <media/i2c/upd64031a.h> /* --------------------- read registers functions define -------------------- */ diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c index f2057a434060..77f122f2e3c9 100644 --- a/drivers/media/i2c/upd64083.c +++ b/drivers/media/i2c/upd64083.c @@ -27,7 +27,7 @@ #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> -#include <media/upd64083.h> +#include <media/i2c/upd64083.h> MODULE_DESCRIPTION("uPD64083 driver"); MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil"); diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c index d33d2cd6d034..6e00f145b948 100644 --- a/drivers/media/i2c/wm8775.c +++ b/drivers/media/i2c/wm8775.c @@ -34,7 +34,7 @@ #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> -#include <media/wm8775.h> +#include <media/i2c/wm8775.h> MODULE_DESCRIPTION("wm8775 driver"); MODULE_AUTHOR("Ulf Eklund, Hans Verkuil"); diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index 4654fb65ca21..8a17cc0bfa07 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -41,7 +41,7 @@ #include "bttvp.h" #include <media/v4l2-common.h> -#include <media/tvaudio.h> +#include <media/i2c/tvaudio.h> #include "bttv-audio-hook.h" /* fwd decl */ @@ -3808,7 +3808,7 @@ static void bttv_tea575x_set_direction(struct snd_tea575x *tea, bool output) gpio_inout(mask, (1 << gpio.clk) | (1 << gpio.wren)); } -static struct snd_tea575x_ops bttv_tea_ops = { +static const struct snd_tea575x_ops bttv_tea_ops = { .set_pins = bttv_tea575x_set_pins, .get_pins = bttv_tea575x_get_pins, .set_direction = bttv_tea575x_set_direction, diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 15a4ebc2844d..9400e996087b 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -50,15 +50,15 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/tvaudio.h> -#include <media/msp3400.h> +#include <media/i2c/tvaudio.h> +#include <media/drv-intf/msp3400.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/byteorder.h> -#include <media/saa6588.h> +#include <media/i2c/saa6588.h> #define BTTV_VERSION "0.9.19" diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h index 31bf79d3b0d2..b1e0023f923c 100644 --- a/drivers/media/pci/bt8xx/bttvp.h +++ b/drivers/media/pci/bt8xx/bttvp.h @@ -41,8 +41,8 @@ #include <media/videobuf-dma-sg.h> #include <media/tveeprom.h> #include <media/rc-core.h> -#include <media/ir-kbd-i2c.h> -#include <media/tea575x.h> +#include <media/i2c/ir-kbd-i2c.h> +#include <media/drv-intf/tea575x.h> #include "bt848.h" #include "bttv.h" diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c index 8fed61ec712e..8d6f04fc8013 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.c +++ b/drivers/media/pci/cobalt/cobalt-driver.c @@ -21,9 +21,9 @@ */ #include <linux/delay.h> -#include <media/adv7604.h> -#include <media/adv7842.h> -#include <media/adv7511.h> +#include <media/i2c/adv7604.h> +#include <media/i2c/adv7842.h> +#include <media/i2c/adv7511.h> #include <media/v4l2-event.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c index 3de26d0714b5..b190d4f81c6e 100644 --- a/drivers/media/pci/cobalt/cobalt-irq.c +++ b/drivers/media/pci/cobalt/cobalt-irq.c @@ -18,7 +18,7 @@ * SOFTWARE. */ -#include <media/adv7604.h> +#include <media/i2c/adv7604.h> #include "cobalt-driver.h" #include "cobalt-irq.h" @@ -134,7 +134,7 @@ done: skip = true; s->skip_first_frames--; } - v4l2_get_timestamp(&cb->vb.timestamp); + cb->vb.vb2_buf.timestamp = ktime_get_ns(); /* TODO: the sequence number should be read from the FPGA so we also know about dropped frames. */ cb->vb.sequence = s->sequence++; diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c index ff46e424262f..c0ba458f6cf3 100644 --- a/drivers/media/pci/cobalt/cobalt-v4l2.c +++ b/drivers/media/pci/cobalt/cobalt-v4l2.c @@ -29,8 +29,8 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> -#include <media/adv7604.h> -#include <media/adv7842.h> +#include <media/i2c/adv7604.h> +#include <media/i2c/adv7842.h> #include "cobalt-alsa.h" #include "cobalt-cpld.h" @@ -43,11 +43,10 @@ static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60; /* vb2 DMA streaming ops */ -static int cobalt_queue_setup(struct vb2_queue *q, const void *parg, +static int cobalt_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct cobalt_stream *s = q->drv_priv; unsigned size = s->stride * s->height; @@ -55,14 +54,11 @@ static int cobalt_queue_setup(struct vb2_queue *q, const void *parg, *num_buffers = 3; if (*num_buffers > NR_BUFS) *num_buffers = NR_BUFS; + alloc_ctxs[0] = s->cobalt->alloc_ctx; + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; *num_planes = 1; - if (fmt) { - if (fmt->fmt.pix.sizeimage < size) - return -EINVAL; - size = fmt->fmt.pix.sizeimage; - } sizes[0] = size; - alloc_ctxs[0] = s->cobalt->alloc_ctx; return 0; } @@ -649,7 +645,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh, return 0; } - if (v4l2_match_dv_timings(timings, &s->timings, 0)) + if (v4l2_match_dv_timings(timings, &s->timings, 0, false)) return 0; if (vb2_is_busy(&s->q)) diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c index c07c849b1aaf..5e01ea441dc4 100644 --- a/drivers/media/pci/cx18/cx18-cards.c +++ b/drivers/media/pci/cx18/cx18-cards.c @@ -26,7 +26,7 @@ #include "cx18-cards.h" #include "cx18-av-core.h" #include "cx18-i2c.h" -#include <media/cs5345.h> +#include <media/i2c/cs5345.h> #define V4L2_STD_PAL_SECAM (V4L2_STD_PAL|V4L2_STD_SECAM) diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c index 71227a155cba..adb5a8c72c06 100644 --- a/drivers/media/pci/cx18/cx18-controls.c +++ b/drivers/media/pci/cx18/cx18-controls.c @@ -126,7 +126,7 @@ static int cx18_s_audio_mode(struct cx2341x_handler *cxhdl, u32 val) return 0; } -struct cx2341x_handler_ops cx18_cxhdl_ops = { +const struct cx2341x_handler_ops cx18_cxhdl_ops = { .s_audio_mode = cx18_s_audio_mode, .s_audio_sampling_freq = cx18_s_audio_sampling_freq, .s_video_encoding = cx18_s_video_encoding, diff --git a/drivers/media/pci/cx18/cx18-controls.h b/drivers/media/pci/cx18/cx18-controls.h index cb5dfc7b2054..326794887863 100644 --- a/drivers/media/pci/cx18/cx18-controls.h +++ b/drivers/media/pci/cx18/cx18-controls.h @@ -21,4 +21,4 @@ * 02111-1307 USA */ -extern struct cx2341x_handler_ops cx18_cxhdl_ops; +extern const struct cx2341x_handler_ops cx18_cxhdl_ops; diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index b15beed2dc14..7e31f2a2e085 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -49,7 +49,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-fh.h> #include <media/tuner.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #include "cx18-mailbox.h" #include "cx18-av-core.h" #include "cx23418.h" diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c index 55525af1f482..eeb741c7db1b 100644 --- a/drivers/media/pci/cx18/cx18-ioctl.c +++ b/drivers/media/pci/cx18/cx18-ioctl.c @@ -453,8 +453,8 @@ static int cx18_cropcap(struct file *file, void *fh, if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; - cropcap->pixelaspect.numerator = cx->is_50hz ? 59 : 10; - cropcap->pixelaspect.denominator = cx->is_50hz ? 54 : 11; + cropcap->pixelaspect.numerator = cx->is_50hz ? 54 : 11; + cropcap->pixelaspect.denominator = cx->is_50hz ? 59 : 10; return 0; } diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h index 767a8d23e3f2..67ffe65b56a3 100644 --- a/drivers/media/pci/cx18/cx23418.h +++ b/drivers/media/pci/cx18/cx23418.h @@ -22,7 +22,7 @@ #ifndef CX23418_H #define CX23418_H -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #define MGR_CMD_MASK 0x40000000 /* The MSB of the command code indicates that this is the completion of a diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig index 2e1b88ccdbf2..3435bbaa3167 100644 --- a/drivers/media/pci/cx23885/Kconfig +++ b/drivers/media/pci/cx23885/Kconfig @@ -10,6 +10,7 @@ config VIDEO_CX23885 select VIDEOBUF2_DMA_SG select VIDEO_CX25840 select VIDEO_CX2341X + select VIDEO_CS3308 select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT select DVB_DRXK if MEDIA_SUBDRV_AUTOSELECT select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index 88a3afb66d10..bd333875a1f7 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -30,7 +30,7 @@ #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include "cx23885.h" #include "cx23885-ioctl.h" @@ -1138,7 +1138,7 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder) /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index f384f295676e..310ee769aed4 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -19,7 +19,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include <linux/firmware.h> #include <misc/altera.h> @@ -715,6 +715,56 @@ struct cx23885_board cx23885_boards[] = { .portb = CX23885_MPEG_DVB, .portc = CX23885_MPEG_DVB, }, + [CX23885_BOARD_VIEWCAST_260E] = { + .name = "ViewCast 260e", + .porta = CX23885_ANALOG_VIDEO, + .force_bff = 1, + .input = {{ + .type = CX23885_VMUX_COMPOSITE1, + .vmux = CX25840_VIN6_CH1, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_SVIDEO, + .vmux = CX25840_VIN7_CH3 | + CX25840_VIN5_CH1 | + CX25840_SVIDEO_ON, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_COMPONENT, + .vmux = CX25840_VIN7_CH3 | + CX25840_VIN6_CH2 | + CX25840_VIN5_CH1 | + CX25840_COMPONENT_ON, + .amux = CX25840_AUDIO7, + } }, + }, + [CX23885_BOARD_VIEWCAST_460E] = { + .name = "ViewCast 460e", + .porta = CX23885_ANALOG_VIDEO, + .force_bff = 1, + .input = {{ + .type = CX23885_VMUX_COMPOSITE1, + .vmux = CX25840_VIN4_CH1, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_SVIDEO, + .vmux = CX25840_VIN7_CH3 | + CX25840_VIN6_CH1 | + CX25840_SVIDEO_ON, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_COMPONENT, + .vmux = CX25840_VIN7_CH3 | + CX25840_VIN6_CH1 | + CX25840_VIN5_CH2 | + CX25840_COMPONENT_ON, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_COMPOSITE2, + .vmux = CX25840_VIN6_CH1, + .amux = CX25840_AUDIO7, + } }, + }, }; const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards); @@ -1002,6 +1052,14 @@ struct cx23885_subid cx23885_subids[] = { .subvendor = 0x0070, .subdevice = 0xf038, .card = CX23885_BOARD_HAUPPAUGE_HVR5525, + }, { + .subvendor = 0x1576, + .subdevice = 0x0260, + .card = CX23885_BOARD_VIEWCAST_260E, + }, { + .subvendor = 0x1576, + .subdevice = 0x0460, + .card = CX23885_BOARD_VIEWCAST_460E, }, }; const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids); @@ -1034,6 +1092,28 @@ void cx23885_card_list(struct cx23885_dev *dev) dev->name, i, cx23885_boards[i].name); } +static void viewcast_eeprom(struct cx23885_dev *dev, u8 *eeprom_data) +{ + u32 sn; + + /* The serial number record begins with tag 0x59 */ + if (*(eeprom_data + 0x00) != 0x59) { + pr_info("%s() eeprom records are undefined, no serial number\n", + __func__); + return; + } + + sn = (*(eeprom_data + 0x06) << 24) | + (*(eeprom_data + 0x05) << 16) | + (*(eeprom_data + 0x04) << 8) | + (*(eeprom_data + 0x03)); + + pr_info("%s: card '%s' sn# MM%d\n", + dev->name, + cx23885_boards[dev->board].name, + sn); +} + static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data) { struct tveeprom tv; @@ -1671,6 +1751,12 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx23885_gpio_set(dev, GPIO_8 | GPIO_9); msleep(100); break; + case CX23885_BOARD_VIEWCAST_260E: + case CX23885_BOARD_VIEWCAST_460E: + /* For documentation purposes, it's worth noting that this + * card does not have any GPIO's connected to subcomponents. + */ + break; } } @@ -1917,6 +2003,14 @@ void cx23885_card_setup(struct cx23885_dev *dev) if (dev->i2c_bus[0].i2c_rc == 0) hauppauge_eeprom(dev, eeprom+0xc0); break; + case CX23885_BOARD_VIEWCAST_260E: + case CX23885_BOARD_VIEWCAST_460E: + dev->i2c_bus[1].i2c_client.addr = 0xa0 >> 1; + tveeprom_read(&dev->i2c_bus[1].i2c_client, + eeprom, sizeof(eeprom)); + if (dev->i2c_bus[0].i2c_rc == 0) + viewcast_eeprom(dev, eeprom); + break; } switch (dev->board) { @@ -2120,6 +2214,8 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_DVBSKY_S950: case CX23885_BOARD_DVBSKY_S952: case CX23885_BOARD_DVBSKY_T982: + case CX23885_BOARD_VIEWCAST_260E: + case CX23885_BOARD_VIEWCAST_460E: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); @@ -2130,6 +2226,24 @@ void cx23885_card_setup(struct cx23885_dev *dev) break; } + switch (dev->board) { + case CX23885_BOARD_VIEWCAST_260E: + v4l2_i2c_new_subdev(&dev->v4l2_dev, + &dev->i2c_bus[0].i2c_adap, + "cs3308", 0x82 >> 1, NULL); + break; + case CX23885_BOARD_VIEWCAST_460E: + /* This cs3308 controls the audio from the breakout cable */ + v4l2_i2c_new_subdev(&dev->v4l2_dev, + &dev->i2c_bus[0].i2c_adap, + "cs3308", 0x80 >> 1, NULL); + /* This cs3308 controls the audio from the onboard header */ + v4l2_i2c_new_subdev(&dev->v4l2_dev, + &dev->i2c_bus[0].i2c_adap, + "cs3308", 0x82 >> 1, NULL); + break; + } + /* AUX-PLL 27MHz CLK */ switch (dev->board) { case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index e8f847226a19..813c217b5e1a 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -427,7 +427,7 @@ static void cx23885_wakeup(struct cx23885_tsport *port, buf = list_entry(q->active.next, struct cx23885_buffer, queue); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.sequence = q->count++; dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.vb2_buf.index, @@ -968,6 +968,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev) call_all(dev, core, s_power, 0); cx23885_ir_init(dev); + if (dev->board == CX23885_BOARD_VIEWCAST_460E) { + /* + * GPIOs 9/8 are input detection bits for the breakout video + * (gpio 8) and audio (gpio 9) cables. When they're attached, + * this gpios are pulled high. Make sure these GPIOs are marked + * as inputs. + */ + cx23885_gpio_enable(dev, 0x300, 0); + } + if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) { if (cx23885_video_register(dev) < 0) { printk(KERN_ERR "%s() Failed to register analog " diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index c4307ad8594c..80319bb73d94 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -92,7 +92,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -2168,11 +2168,12 @@ static int dvb_register(struct cx23885_tsport *port) } port->i2c_client_tuner = client_tuner; break; - case CX23885_BOARD_HAUPPAUGE_HVR5525: - switch (port->nr) { + case CX23885_BOARD_HAUPPAUGE_HVR5525: { struct m88rs6000t_config m88rs6000t_config; struct a8293_platform_data a8293_pdata = {}; + switch (port->nr) { + /* port b - satellite */ case 1: /* attach frontend */ @@ -2267,6 +2268,7 @@ static int dvb_register(struct cx23885_tsport *port) break; } break; + } default: printk(KERN_INFO "%s: The frontend of your DVB/ATSC card " " isn't supported yet\n", diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c index 1135ea3f6ce5..ae061b358591 100644 --- a/drivers/media/pci/cx23885/cx23885-i2c.c +++ b/drivers/media/pci/cx23885/cx23885-i2c.c @@ -279,6 +279,8 @@ static char *i2c_devs[128] = { [0x10 >> 1] = "tda10048", [0x12 >> 1] = "dib7000pc", [0x1c >> 1] = "lgdt3303", + [0x80 >> 1] = "cs3308", + [0x82 >> 1] = "cs3308", [0x86 >> 1] = "tda9887", [0x32 >> 1] = "cx24227", [0x88 >> 1] = "cx25837", diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index 088799c3b49b..64328d08ac2f 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -268,7 +268,7 @@ int cx23885_input_init(struct cx23885_dev *dev) struct rc_dev *rc; char *rc_map; enum rc_driver_type driver_type; - unsigned long allowed_protos; + u64 allowed_protos; int ret; diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c index cf3cb1324c55..39750ebcc04c 100644 --- a/drivers/media/pci/cx23885/cx23885-vbi.c +++ b/drivers/media/pci/cx23885/cx23885-vbi.c @@ -83,7 +83,7 @@ int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status) if (status & VID_BC_MSK_VBI_RISCI1) { dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__); spin_lock(&dev->slock); - count = cx_read(VID_A_GPCNT); + count = cx_read(VBI_A_GPCNT); cx23885_video_wakeup(dev, &dev->vbiq, count); spin_unlock(&dev->slock); handled++; @@ -103,7 +103,6 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev, VBI_LINE_LENGTH, buf->risc.dma); /* reset counter */ - cx_write(VID_A_GPCNT_CTL, 3); cx_write(VID_A_VBI_CTRL, 3); cx_write(VBI_A_GPCNT_CTL, 3); q->count = 0; @@ -121,7 +120,7 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev, /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 71a80e2b842c..e1d7d0847167 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -35,7 +35,7 @@ #include "cx23885-ioctl.h" #include "tuner-xc2028.h" -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards"); MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); @@ -105,7 +105,7 @@ void cx23885_video_wakeup(struct cx23885_dev *dev, struct cx23885_buffer, queue); buf->vb.sequence = q->count++; - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.vb2_buf.index, count, q->count); list_del(&buf->queue); @@ -114,11 +114,19 @@ void cx23885_video_wakeup(struct cx23885_dev *dev, int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm) { + struct v4l2_subdev_format format = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .format.code = MEDIA_BUS_FMT_FIXED, + }; + dprintk(1, "%s(norm = 0x%08x) name: [%s]\n", __func__, (unsigned int)norm, v4l2_norm_to_name(norm)); + if (dev->tvnorm == norm) + return 0; + if (dev->tvnorm != norm) { if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq) || vb2_is_busy(&dev->vb2_mpegq)) @@ -126,9 +134,17 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm) } dev->tvnorm = norm; + dev->width = 720; + dev->height = norm_maxh(norm); + dev->field = V4L2_FIELD_INTERLACED; call_all(dev, video, s_std, norm); + format.format.width = dev->width; + format.format.height = dev->height; + format.format.field = dev->field; + call_all(dev, pad, set_fmt, NULL, &format); + return 0; } @@ -247,7 +263,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input) (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) || (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) || (dev->board == CX23885_BOARD_MYGICA_X8507) || - (dev->board == CX23885_BOARD_AVERMEDIA_HC81R)) { + (dev->board == CX23885_BOARD_AVERMEDIA_HC81R) || + (dev->board == CX23885_BOARD_VIEWCAST_260E) || + (dev->board == CX23885_BOARD_VIEWCAST_460E)) { /* Configure audio routing */ v4l2_subdev_call(dev->sd_cx25840, audio, s_routing, INPUT(input)->amux, 0, 0); @@ -315,7 +333,7 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev, return 0; } -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -545,7 +563,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; field = f->fmt.pix.field; - maxw = norm_maxw(dev->tvnorm); + maxw = 720; maxh = norm_maxh(dev->tvnorm); if (V4L2_FIELD_ANY == field) { @@ -648,6 +666,26 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, return 0; } +static int vidioc_cropcap(struct file *file, void *priv, + struct v4l2_cropcap *cc) +{ + struct cx23885_dev *dev = video_drvdata(file); + bool is_50hz = dev->tvnorm & V4L2_STD_625_50; + + if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + cc->bounds.left = 0; + cc->bounds.top = 0; + cc->bounds.width = 720; + cc->bounds.height = norm_maxh(dev->tvnorm); + cc->defrect = cc->bounds; + cc->pixelaspect.numerator = is_50hz ? 54 : 11; + cc->pixelaspect.denominator = is_50hz ? 59 : 10; + + return 0; +} + static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id) { struct cx23885_dev *dev = video_drvdata(file); @@ -1082,6 +1120,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_cropcap = vidioc_cropcap, .vidioc_s_std = vidioc_s_std, .vidioc_g_std = vidioc_g_std, .vidioc_enum_input = vidioc_enum_input, diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index c5ba0833f47a..b1a5409408c7 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -30,7 +30,7 @@ #include <media/rc-core.h> #include "cx23885-reg.h" -#include "media/cx2341x.h" +#include "media/drv-intf/cx2341x.h" #include <linux/mutex.h> @@ -101,6 +101,8 @@ #define CX23885_BOARD_DVBSKY_T982 51 #define CX23885_BOARD_HAUPPAUGE_HVR5525 52 #define CX23885_BOARD_HAUPPAUGE_STARBURST 53 +#define CX23885_BOARD_VIEWCAST_260E 54 +#define CX23885_BOARD_VIEWCAST_460E 55 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 @@ -627,11 +629,6 @@ extern int cx23885_risc_databuffer(struct pci_dev *pci, /* ----------------------------------------------------------- */ /* tv norms */ -static inline unsigned int norm_maxw(v4l2_std_id norm) -{ - return (norm & V4L2_STD_525_60) ? 720 : 768; -} - static inline unsigned int norm_maxh(v4l2_std_id norm) { return (norm & V4L2_STD_525_60) ? 480 : 576; diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index 26e3e296d615..c48bba9daf1f 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c @@ -130,7 +130,7 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status) buf = list_entry(dmaq->active.next, struct cx25821_buffer, queue); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.sequence = dmaq->count++; list_del(&buf->queue); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -141,20 +141,20 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status) return handled; } -static int cx25821_queue_setup(struct vb2_queue *q, const void *parg, +static int cx25821_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct cx25821_channel *chan = q->drv_priv; unsigned size = (chan->fmt->depth * chan->width * chan->height) >> 3; - if (fmt && fmt->fmt.pix.sizeimage < size) - return -EINVAL; + alloc_ctxs[0] = chan->dev->alloc_ctx; + + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; *num_planes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : size; - alloc_ctxs[0] = chan->dev->alloc_ctx; + sizes[0] = size; return 0; } diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 1b5268f9bb24..e158a1da1d41 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -40,7 +40,7 @@ #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> -#include <media/wm8775.h> +#include <media/i2c/wm8775.h> #include "cx88.h" #include "cx88-reg.h" diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index 8b889135be8a..3233d45d1e5b 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -36,7 +36,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include "cx88.h" @@ -637,7 +637,7 @@ static int blackbird_stop_codec(struct cx8802_dev *dev) /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c index 9a43c7826b60..46fe8c1eb9d4 100644 --- a/drivers/media/pci/cx88/cx88-core.c +++ b/drivers/media/pci/cx88/cx88-core.c @@ -518,7 +518,7 @@ void cx88_wakeup(struct cx88_core *core, buf = list_entry(q->active.next, struct cx88_buffer, list); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.field = core->field; buf->vb.sequence = q->count++; list_del(&buf->list); diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c index f04835073844..afb20756d7a5 100644 --- a/drivers/media/pci/cx88/cx88-dvb.c +++ b/drivers/media/pci/cx88/cx88-dvb.c @@ -82,7 +82,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c index 007a5eee8e5e..ccc646d819f2 100644 --- a/drivers/media/pci/cx88/cx88-vbi.c +++ b/drivers/media/pci/cx88/cx88-vbi.c @@ -107,7 +107,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev, /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index aef9acf351f6..5f331df65fb9 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -41,7 +41,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/wm8775.h> +#include <media/i2c/wm8775.h> MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); @@ -429,7 +429,7 @@ static int restart_video_queue(struct cx8800_dev *dev, /* ------------------------------------------------------------------ */ -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h index 2996eb3ea1fc..78f817ee7e41 100644 --- a/drivers/media/pci/cx88/cx88.h +++ b/drivers/media/pci/cx88/cx88.h @@ -30,10 +30,10 @@ #include <media/tuner.h> #include <media/tveeprom.h> #include <media/videobuf2-dma-sg.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include <media/videobuf2-dvb.h> -#include <media/ir-kbd-i2c.h> -#include <media/wm8775.h> +#include <media/i2c/ir-kbd-i2c.h> +#include <media/i2c/wm8775.h> #include "cx88-reg.h" #include "tuner-xc2028.h" diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 0ac2dd35fe50..fba5b40a869c 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -81,13 +81,13 @@ static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr, static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) { struct ddb *dev = i2c->dev; - int stat; + long stat; u32 val; i2c->done = 0; ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); - if (stat <= 0) { + if (stat == 0) { printk(KERN_ERR "I2C timeout\n"); { /* MSI debugging*/ u32 istat = ddbreadl(INTERRUPT_STATUS); diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index 88915fb87e80..5dd504741b12 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -1206,7 +1206,6 @@ static void dm1105_remove(struct pci_dev *pdev) i2c_del_adapter(&dev->i2c_adap); dm1105_hw_exit(dev); - synchronize_irq(pdev->irq); free_irq(pdev->irq, dev); pci_iounmap(pdev, dev->io_mem); pci_release_regions(pdev); diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c index d84abde5ea29..568c0c8fb2dc 100644 --- a/drivers/media/pci/dt3155/dt3155.c +++ b/drivers/media/pci/dt3155/dt3155.c @@ -131,22 +131,21 @@ static int wait_i2c_reg(void __iomem *addr) } static int -dt3155_queue_setup(struct vb2_queue *vq, const void *parg, +dt3155_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct dt3155_priv *pd = vb2_get_drv_priv(vq); unsigned size = pd->width * pd->height; if (vq->num_buffers + *nbuffers < 2) *nbuffers = 2 - vq->num_buffers; - if (fmt && fmt->fmt.pix.sizeimage < size) - return -EINVAL; - *num_planes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : size; alloc_ctxs[0] = pd->alloc_ctx; + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; + *num_planes = 1; + sizes[0] = size; return 0; } @@ -271,7 +270,7 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id) spin_lock(&ipd->lock); if (ipd->curr_buf && !list_empty(&ipd->dmaq)) { - v4l2_get_timestamp(&ipd->curr_buf->timestamp); + ipd->curr_buf->vb2_buf.timestamp = ktime_get_ns(); ipd->curr_buf->sequence = ipd->sequence++; ipd->curr_buf->field = V4L2_FIELD_NONE; vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE); diff --git a/drivers/media/pci/ivtv/ivtv-cards.c b/drivers/media/pci/ivtv/ivtv-cards.c index 145e4749a69d..410d97bdf541 100644 --- a/drivers/media/pci/ivtv/ivtv-cards.c +++ b/drivers/media/pci/ivtv/ivtv-cards.c @@ -22,12 +22,12 @@ #include "ivtv-cards.h" #include "ivtv-i2c.h" -#include <media/msp3400.h> -#include <media/m52790.h> -#include <media/wm8775.h> -#include <media/cs53l32a.h> -#include <media/cx25840.h> -#include <media/upd64031a.h> +#include <media/drv-intf/msp3400.h> +#include <media/i2c/m52790.h> +#include <media/i2c/wm8775.h> +#include <media/i2c/cs53l32a.h> +#include <media/drv-intf/cx25840.h> +#include <media/i2c/upd64031a.h> #define MSP_TUNER MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \ MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER) diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c index 8a55ccb8f0c9..9666ca01549c 100644 --- a/drivers/media/pci/ivtv/ivtv-controls.c +++ b/drivers/media/pci/ivtv/ivtv-controls.c @@ -96,7 +96,7 @@ static int ivtv_s_audio_mode(struct cx2341x_handler *cxhdl, u32 val) return 0; } -struct cx2341x_handler_ops ivtv_cxhdl_ops = { +const struct cx2341x_handler_ops ivtv_cxhdl_ops = { .s_audio_mode = ivtv_s_audio_mode, .s_audio_sampling_freq = ivtv_s_audio_sampling_freq, .s_video_encoding = ivtv_s_video_encoding, diff --git a/drivers/media/pci/ivtv/ivtv-controls.h b/drivers/media/pci/ivtv/ivtv-controls.h index 3999e6358312..ea397ba837e3 100644 --- a/drivers/media/pci/ivtv/ivtv-controls.h +++ b/drivers/media/pci/ivtv/ivtv-controls.h @@ -21,7 +21,7 @@ #ifndef IVTV_CONTROLS_H #define IVTV_CONTROLS_H -extern struct cx2341x_handler_ops ivtv_cxhdl_ops; +extern const struct cx2341x_handler_ops ivtv_cxhdl_ops; extern const struct v4l2_ctrl_ops ivtv_hdl_out_ops; int ivtv_g_pts_frame(struct ivtv *itv, s64 *pts, s64 *frame); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 8616fa8193bc..374033a5bdaf 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -57,7 +57,7 @@ #include "ivtv-gpio.h" #include <linux/dma-mapping.h> #include <media/tveeprom.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include "tuner-xc2028.h" /* If you have already X v4l cards, then set this to X. This way @@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv) { int i; - for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) + for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) if (itv->card->video_inputs[i].video_type == 0) break; itv->nof_inputs = i; - for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) + for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) if (itv->card->audio_inputs[i].audio_type == 0) break; itv->nof_audio_inputs = i; @@ -826,7 +826,7 @@ static void ivtv_init_struct2(struct ivtv *itv) IVTV_CARD_INPUT_VID_TUNER) break; } - if (i == itv->nof_inputs) + if (i >= itv->nof_inputs) i = 0; itv->active_input = i; itv->audio_input = itv->card->video_inputs[i].audio_index; diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index ee0ef6e48c7d..6c08dae67a73 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -64,8 +64,8 @@ #include <media/v4l2-device.h> #include <media/v4l2-fh.h> #include <media/tuner.h> -#include <media/cx2341x.h> -#include <media/ir-kbd-i2c.h> +#include <media/drv-intf/cx2341x.h> +#include <media/i2c/ir-kbd-i2c.h> #include <linux/ivtv.h> diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c index 605d280d8a5f..c9bd018e53de 100644 --- a/drivers/media/pci/ivtv/ivtv-fileops.c +++ b/drivers/media/pci/ivtv/ivtv-fileops.c @@ -34,7 +34,7 @@ #include "ivtv-cards.h" #include "ivtv-firmware.h" #include <media/v4l2-event.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> /* This function tries to claim the stream for a specific file descriptor. If no one else is using this stream then the stream is claimed and diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c index 4b0e758a7bce..5b3095f65dce 100644 --- a/drivers/media/pci/ivtv/ivtv-firmware.c +++ b/drivers/media/pci/ivtv/ivtv-firmware.c @@ -26,7 +26,7 @@ #include "ivtv-ioctl.h" #include "ivtv-cards.h" #include <linux/firmware.h> -#include <media/saa7127.h> +#include <media/i2c/saa7127.h> #define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE #define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6 diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c index 1a41ba5c7d30..bccbf2d18e30 100644 --- a/drivers/media/pci/ivtv/ivtv-i2c.c +++ b/drivers/media/pci/ivtv/ivtv-i2c.c @@ -63,7 +63,7 @@ #include "ivtv-cards.h" #include "ivtv-gpio.h" #include "ivtv-i2c.h" -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> /* i2c implementation for cx23415/6 chip, ivtv project. * Author: Kevin Thayer (nufan_wfk at yahoo.com) diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c index 9a21c17fc376..2dc4b20f3ac0 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c @@ -32,7 +32,7 @@ #include "ivtv-gpio.h" #include "ivtv-controls.h" #include "ivtv-cards.h" -#include <media/saa7127.h> +#include <media/i2c/saa7127.h> #include <media/tveeprom.h> #include <media/v4l2-event.h> #include <linux/dvb/audio.h> @@ -831,11 +831,11 @@ static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropca struct ivtv *itv = id->itv; if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { - cropcap->pixelaspect.numerator = itv->is_50hz ? 59 : 10; - cropcap->pixelaspect.denominator = itv->is_50hz ? 54 : 11; + cropcap->pixelaspect.numerator = itv->is_50hz ? 54 : 11; + cropcap->pixelaspect.denominator = itv->is_50hz ? 59 : 10; } else if (cropcap->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { - cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; - cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; + cropcap->pixelaspect.numerator = itv->is_out_50hz ? 54 : 11; + cropcap->pixelaspect.denominator = itv->is_out_50hz ? 59 : 10; } else { return -EINVAL; } diff --git a/drivers/media/pci/ivtv/ivtv-routing.c b/drivers/media/pci/ivtv/ivtv-routing.c index 8898c569a1c9..0c168f238904 100644 --- a/drivers/media/pci/ivtv/ivtv-routing.c +++ b/drivers/media/pci/ivtv/ivtv-routing.c @@ -24,10 +24,10 @@ #include "ivtv-gpio.h" #include "ivtv-routing.h" -#include <media/msp3400.h> -#include <media/m52790.h> -#include <media/upd64031a.h> -#include <media/upd64083.h> +#include <media/drv-intf/msp3400.h> +#include <media/i2c/m52790.h> +#include <media/i2c/upd64031a.h> +#include <media/i2c/upd64083.h> /* Selects the audio input and output according to the current settings. */ diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 3fdbd81b5580..81e1a5e26efb 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -277,7 +277,6 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id) } static int netup_unidvb_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], @@ -580,7 +579,7 @@ static void netup_unidvb_dma_worker(struct work_struct *work) dev_dbg(&ndev->pci_dev->dev, "%s(): buffer %p done, size %d\n", __func__, buf, buf->size); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index f720cea80e28..e227b02cc122 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -309,7 +309,7 @@ void saa7134_buffer_finish(struct saa7134_dev *dev, core_dbg("buffer_finish %p\n", q->curr); /* finish current buffer */ - v4l2_get_timestamp(&q->curr->vb2.timestamp); + q->curr->vb2.vb2_buf.timestamp = ktime_get_ns(); q->curr->vb2.sequence = q->seq_nr++; vb2_buffer_done(&q->curr->vb2.vb2_buf, state); q->curr = NULL; diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c index 7fb5ee7e20ac..0584a2adbe99 100644 --- a/drivers/media/pci/saa7134/saa7134-ts.c +++ b/drivers/media/pci/saa7134/saa7134-ts.c @@ -116,7 +116,7 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2) } EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare); -int saa7134_ts_queue_setup(struct vb2_queue *q, const void *parg, +int saa7134_ts_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c index 6271b0eb0265..e76da37c4a8a 100644 --- a/drivers/media/pci/saa7134/saa7134-vbi.c +++ b/drivers/media/pci/saa7134/saa7134-vbi.c @@ -138,7 +138,7 @@ static int buffer_prepare(struct vb2_buffer *vb2) saa7134_buffer_startpage(buf)); } -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c index 518086c7aed5..a63c1366a64e 100644 --- a/drivers/media/pci/saa7134/saa7134-video.c +++ b/drivers/media/pci/saa7134/saa7134-video.c @@ -32,7 +32,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-event.h> -#include <media/saa6588.h> +#include <media/i2c/saa6588.h> /* ------------------------------------------------------------------ */ @@ -904,7 +904,7 @@ static int buffer_prepare(struct vb2_buffer *vb2) saa7134_buffer_startpage(buf)); } -static int queue_setup(struct vb2_queue *q, const void *parg, +static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h index 6b6d234f5cab..5938bc781999 100644 --- a/drivers/media/pci/saa7134/saa7134.h +++ b/drivers/media/pci/saa7134/saa7134.h @@ -42,7 +42,7 @@ #include <media/v4l2-ctrls.h> #include <media/tuner.h> #include <media/rc-core.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #include <media/videobuf2-dma-sg.h> #include <sound/core.h> #include <sound/pcm.h> @@ -820,7 +820,7 @@ void saa7134_video_fini(struct saa7134_dev *dev); int saa7134_ts_buffer_init(struct vb2_buffer *vb2); int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2); -int saa7134_ts_queue_setup(struct vb2_queue *q, const void *parg, +int saa7134_ts_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]); int saa7134_ts_start_streaming(struct vb2_queue *vq, unsigned int count); diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index 03cbcd2095c6..c889ec9f8a5a 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -25,7 +25,7 @@ #define DEBUG_VARIABLE debug -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <linux/module.h> static int debug; diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 15f0d66ff78a..c306a92e8909 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -25,7 +25,7 @@ #define DEBUG_VARIABLE debug -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <linux/module.h> static int debug; diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 0ca1e07ae783..504d78807639 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -25,10 +25,10 @@ #define DEBUG_VARIABLE debug -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <media/tuner.h> #include <media/v4l2-common.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <linux/module.h> #include "tea6415c.h" diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c index 4432fd69b7cb..67a14c41c227 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c @@ -531,8 +531,7 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, if (!ret) { vbuf->sequence = solo_enc->sequence++; - vbuf->timestamp.tv_sec = vop_sec(vh); - vbuf->timestamp.tv_usec = vop_usec(vh); + vb->timestamp = ktime_get_ns(); /* Check for motion flags */ if (solo_is_motion_on(solo_enc) && enc_buf->motion) { @@ -663,7 +662,6 @@ static int solo_ring_thread(void *data) } static int solo_enc_queue_setup(struct vb2_queue *q, - const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c index f7ce493b1fee..721ff5320de7 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c @@ -225,7 +225,7 @@ finish_buf: vb2_set_plane_payload(vb, 0, solo_vlines(solo_dev) * solo_bytesperline(solo_dev)); vbuf->sequence = solo_dev->sequence++; - v4l2_get_timestamp(&vbuf->timestamp); + vb->timestamp = ktime_get_ns(); } vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); @@ -313,7 +313,7 @@ static void solo_stop_thread(struct solo_dev *solo_dev) solo_dev->kthread = NULL; } -static int solo_queue_setup(struct vb2_queue *q, const void *parg, +static int solo_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index 6367b455a7e7..753411cbbc9a 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -265,7 +265,7 @@ static void vip_active_buf_next(struct sta2x11_vip *vip) /* Videobuf2 Operations */ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -817,7 +817,7 @@ static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip) /* Disable acquisition */ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA); /* Remove the active buffer from the list */ - v4l2_get_timestamp(&vip->active->vb.timestamp); + vip->active->vb.vb2_buf.timestamp = ktime_get_ns(); vip->active->vb.sequence = vip->sequence++; vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE); } diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index f89364951ebd..5e18b6796ed9 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -26,7 +26,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ @@ -1537,7 +1537,7 @@ static int get_firmware(struct av7110* av7110) printk(KERN_ERR "dvb-ttpci: usually this should be in " "/usr/lib/hotplug/firmware or /lib/firmware\n"); printk(KERN_ERR "dvb-ttpci: and can be downloaded from" - " http://www.linuxtv.org/download/dvb/firmware/\n"); + " https://linuxtv.org/download/dvb/firmware/\n"); } else printk(KERN_ERR "dvb-ttpci: cannot request firmware" " (error %i)\n", ret); @@ -2314,7 +2314,7 @@ static int frontend_init(struct av7110 *av7110) /* Budgetpatch note: * Original hardware design by Roberto Deza: * There is a DVB_Wiki at - * http://www.linuxtv.org/ + * https://linuxtv.org * * New software triggering design by Emard that works on * original Roberto Deza's hardware: diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h index 3a55927edb95..3707ccd02732 100644 --- a/drivers/media/pci/ttpci/av7110.h +++ b/drivers/media/pci/ttpci/av7110.h @@ -32,7 +32,7 @@ #include "stv0297.h" #include "l64781.h" -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #define ANALOG_TUNER_VES1820 1 diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c index 9ed1ec7d3551..6fc748e22017 100644 --- a/drivers/media/pci/ttpci/av7110_av.c +++ b/drivers/media/pci/ttpci/av7110_av.c @@ -25,7 +25,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #include <linux/types.h> @@ -280,9 +280,11 @@ int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen) } -int av7110_set_volume(struct av7110 *av7110, int volleft, int volright) +int av7110_set_volume(struct av7110 *av7110, unsigned int volleft, + unsigned int volright) { - int err, vol, val, balance = 0; + unsigned int vol, val, balance = 0; + int err; dprintk(2, "av7110:%p, \n", av7110); @@ -1043,6 +1045,9 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len dprintk(2, "av7110:%p, \n", av7110); + if (len == 0) + return 0; + if (!(av7110->playing & RP_VIDEO)) { if (av7110_av_start_play(av7110, RP_VIDEO) < 0) return -EBUSY; diff --git a/drivers/media/pci/ttpci/av7110_av.h b/drivers/media/pci/ttpci/av7110_av.h index 5f02ef85e47d..f52276f47709 100644 --- a/drivers/media/pci/ttpci/av7110_av.h +++ b/drivers/media/pci/ttpci/av7110_av.h @@ -10,7 +10,8 @@ extern int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len); extern int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen); extern int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len); -extern int av7110_set_volume(struct av7110 *av7110, int volleft, int volright); +extern int av7110_set_volume(struct av7110 *av7110, unsigned int volleft, + unsigned int volright); extern int av7110_av_stop(struct av7110 *av7110, int av); extern int av7110_av_start_record(struct av7110 *av7110, int av, struct dvb_demux_feed *dvbdmxfeed); diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c index a6079b90252a..bc4c65ffd4b9 100644 --- a/drivers/media/pci/ttpci/av7110_ca.c +++ b/drivers/media/pci/ttpci/av7110_ca.c @@ -25,7 +25,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #include <linux/kernel.h> diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c index 300bd3c94738..0583d56ef5ef 100644 --- a/drivers/media/pci/ttpci/av7110_hw.c +++ b/drivers/media/pci/ttpci/av7110_hw.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ /* for debugging ARM communication: */ diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c index 6c4076acb131..479aff02db81 100644 --- a/drivers/media/pci/ttpci/av7110_v4l.c +++ b/drivers/media/pci/ttpci/av7110_v4l.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index 3e469d4e0c87..6f0d0161970e 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -30,7 +30,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -46,7 +46,7 @@ #include "tda1004x.h" #include "tua6100.h" #include "dvb-pll.h" -#include <media/saa7146_vv.h> +#include <media/drv-intf/saa7146_vv.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c index 1feeeff3681b..7b27af4d9658 100644 --- a/drivers/media/pci/ttpci/budget-ci.c +++ b/drivers/media/pci/ttpci/budget-ci.c @@ -26,7 +26,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #include <linux/module.h> diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c index e9674b40007c..6d42dcfd4825 100644 --- a/drivers/media/pci/ttpci/budget-core.c +++ b/drivers/media/pci/ttpci/budget-core.c @@ -31,7 +31,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c index b5b65962ce8f..591dbdfa2a13 100644 --- a/drivers/media/pci/ttpci/budget-patch.c +++ b/drivers/media/pci/ttpci/budget-patch.c @@ -27,7 +27,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #include "av7110.h" diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c index 99972beca262..de54310a2660 100644 --- a/drivers/media/pci/ttpci/budget.c +++ b/drivers/media/pci/ttpci/budget.c @@ -31,7 +31,7 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at https://linuxtv.org */ #include "budget.h" diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h index 1ccbe1a49a4b..655eef5236ca 100644 --- a/drivers/media/pci/ttpci/budget.h +++ b/drivers/media/pci/ttpci/budget.h @@ -13,7 +13,7 @@ #include <linux/module.h> #include <linux/mutex.h> -#include <media/saa7146.h> +#include <media/drv-intf/saa7146.h> extern int budget_debug; diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c index 46642ef9151b..07116a87a57b 100644 --- a/drivers/media/pci/tw68/tw68-video.c +++ b/drivers/media/pci/tw68/tw68-video.c @@ -376,28 +376,28 @@ static int tw68_buffer_count(unsigned int size, unsigned int count) /* ------------------------------------------------------------- */ /* vb2 queue operations */ -static int tw68_queue_setup(struct vb2_queue *q, const void *parg, +static int tw68_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct tw68_dev *dev = vb2_get_drv_priv(q); unsigned tot_bufs = q->num_buffers + *num_buffers; + unsigned size = (dev->fmt->depth * dev->width * dev->height) >> 3; - sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3; + if (tot_bufs < 2) + tot_bufs = 2; + tot_bufs = tw68_buffer_count(size, tot_bufs); + *num_buffers = tot_bufs - q->num_buffers; alloc_ctxs[0] = dev->alloc_ctx; /* - * We allow create_bufs, but only if the sizeimage is the same as the + * We allow create_bufs, but only if the sizeimage is >= as the * current sizeimage. The tw68_buffer_count calculation becomes quite * difficult otherwise. */ - if (fmt && fmt->fmt.pix.sizeimage < sizes[0]) - return -EINVAL; + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; *num_planes = 1; - if (tot_bufs < 2) - tot_bufs = 2; - tot_bufs = tw68_buffer_count(sizes[0], tot_bufs); - *num_buffers = tot_bufs - q->num_buffers; + sizes[0] = size; return 0; } @@ -1016,7 +1016,7 @@ void tw68_irq_video_done(struct tw68_dev *dev, unsigned long status) buf = list_entry(dev->active.next, struct tw68_buf, list); list_del(&buf->list); spin_unlock(&dev->slock); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.field = dev->field; buf->vb.sequence = dev->seqnr++; vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 1136d92af642..9d2697f5b455 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -50,7 +50,7 @@ #include <linux/mutex.h> #include <linux/io.h> #include <media/v4l2-common.h> -#include <media/bt819.h> +#include <media/i2c/bt819.h> #include "videocodec.h" #include "zoran.h" diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index ccbc9742cb7a..0c53805dff0e 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -38,7 +38,7 @@ config VIDEO_SH_VOU depends on MEDIA_CAMERA_SUPPORT depends on VIDEO_DEV && I2C && HAS_DMA depends on ARCH_SHMOBILE || COMPILE_TEST - select VIDEOBUF_DMA_CONTIG + select VIDEOBUF2_DMA_CONTIG help Support for the Video Output Unit (VOU) on SuperH SoCs. diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index f0480d687f17..de32e3a3d4d1 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -1281,7 +1281,7 @@ static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe) */ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) { - v4l2_get_timestamp(&vpfe->cur_frm->vb.timestamp); + vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field; vpfe->cur_frm->vb.sequence = vpfe->sequence++; vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -1898,7 +1898,6 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe) /* * vpfe_queue_setup - Callback function for buffer setup. * @vq: vb2_queue ptr - * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. @@ -1908,22 +1907,24 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe) * the buffer count and buffer size */ static int vpfe_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct vpfe_device *vpfe = vb2_get_drv_priv(vq); - - if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage) - return -EINVAL; + unsigned size = vpfe->fmt.fmt.pix.sizeimage; if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; + alloc_ctxs[0] = vpfe->alloc_ctx; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : vpfe->fmt.fmt.pix.sizeimage; - alloc_ctxs[0] = vpfe->alloc_ctx; + sizes[0] = size; vpfe_dbg(1, vpfe, "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]); diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c index 7764b9c482ef..d0092dae7a57 100644 --- a/drivers/media/platform/blackfin/bfin_capture.c +++ b/drivers/media/platform/blackfin/bfin_capture.c @@ -202,22 +202,20 @@ static void bcap_free_sensor_formats(struct bcap_device *bcap_dev) } static int bcap_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct bcap_device *bcap_dev = vb2_get_drv_priv(vq); - if (fmt && fmt->fmt.pix.sizeimage < bcap_dev->fmt.sizeimage) - return -EINVAL; - if (vq->num_buffers + *nbuffers < 2) *nbuffers = 2; + alloc_ctxs[0] = bcap_dev->alloc_ctx; + + if (*nplanes) + return sizes[0] < bcap_dev->fmt.sizeimage ? -EINVAL : 0; *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : bcap_dev->fmt.sizeimage; - alloc_ctxs[0] = bcap_dev->alloc_ctx; + sizes[0] = bcap_dev->fmt.sizeimage; return 0; } @@ -406,7 +404,7 @@ static irqreturn_t bcap_isr(int irq, void *dev_id) spin_lock(&bcap_dev->lock); if (!list_empty(&bcap_dev->dma_queue)) { - v4l2_get_timestamp(&vbuf->timestamp); + vb->timestamp = ktime_get_ns(); if (ppi->err) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); ppi->err = false; diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 654e964f84a2..7d28899f89ce 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -246,7 +246,7 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming) /* Drop frames that do not start/end with a SOI/EOI markers */ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG && - !coda_jpeg_check_buffer(ctx, src_buf)) { + !coda_jpeg_check_buffer(ctx, &src_buf->vb2_buf)) { v4l2_err(&ctx->dev->v4l2_dev, "dropping invalid JPEG frame %d\n", ctx->qsequence); @@ -279,7 +279,7 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming) if (meta) { meta->sequence = src_buf->sequence; meta->timecode = src_buf->timecode; - meta->timestamp = src_buf->timestamp; + meta->timestamp = src_buf->vb2_buf.timestamp; meta->start = start; meta->end = ctx->bitstream_fifo.kfifo.in & ctx->bitstream_fifo.kfifo.mask; @@ -1364,7 +1364,7 @@ static void coda_finish_encode(struct coda_ctx *ctx) dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME; } - dst_buf->timestamp = src_buf->timestamp; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; @@ -2040,7 +2040,7 @@ static void coda_finish_decode(struct coda_ctx *ctx) dst_buf->flags |= ctx->frame_types[ctx->display_idx]; meta = &ctx->frame_metas[ctx->display_idx]; dst_buf->timecode = meta->timecode; - dst_buf->timestamp = meta->timestamp; + dst_buf->vb2_buf.timestamp = meta->timestamp; trace_coda_dec_rot_done(ctx, dst_buf, meta); diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 15516a6e3a39..2d782ce94a67 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -28,7 +28,7 @@ #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/of.h> -#include <linux/platform_data/coda.h> +#include <linux/platform_data/media/coda.h> #include <linux/reset.h> #include <media/v4l2-ctrls.h> @@ -131,6 +131,7 @@ static const struct coda_codec coda7_codecs[] = { CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720), CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192), CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088), + CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192), }; @@ -139,6 +140,7 @@ static const struct coda_codec coda9_codecs[] = { CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088), CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088), CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088), + CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), }; @@ -187,6 +189,7 @@ static const struct coda_video_device coda_bit_decoder = { .ops = &coda_bit_decode_ops, .src_formats = { V4L2_PIX_FMT_H264, + V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_MPEG4, }, .dst_formats = { @@ -293,7 +296,8 @@ static void coda_get_max_dimensions(struct coda_dev *dev, *max_h = h; } -const struct coda_video_device *to_coda_video_device(struct video_device *vdev) +static const struct coda_video_device *to_coda_video_device(struct video_device + *vdev) { struct coda_dev *dev = video_get_drvdata(vdev); unsigned int i = vdev - dev->vfd; @@ -469,6 +473,7 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec, /* fallthrough */ case V4L2_PIX_FMT_H264: case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_MPEG2: f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx, f->fmt.pix.sizeimage, @@ -920,6 +925,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = { .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, @@ -1131,7 +1137,7 @@ static void set_default_params(struct coda_ctx *ctx) /* * Queue operations */ -static int coda_queue_setup(struct vb2_queue *vq, const void *parg, +static int coda_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -1250,6 +1256,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) struct vb2_v4l2_buffer *buf; int ret = 0; + if (count < 1) + return -EINVAL; + q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) { @@ -1262,20 +1271,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) ret = -EINVAL; goto err; } - } else { - if (count < 1) { - ret = -EINVAL; - goto err; - } } ctx->streamon_out = 1; } else { - if (count < 1) { - ret = -EINVAL; - goto err; - } - ctx->streamon_cap = 1; } diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index 96cd42a0baaf..9f899a6cefed 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -178,14 +178,28 @@ int coda_jpeg_write_tables(struct coda_ctx *ctx) return 0; } -bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb) +bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb) { - void *vaddr = vb2_plane_vaddr(&vb->vb2_buf, 0); - u16 soi = be16_to_cpup((__be16 *)vaddr); - u16 eoi = be16_to_cpup((__be16 *)(vaddr + - vb2_get_plane_payload(&vb->vb2_buf, 0) - 2)); + void *vaddr = vb2_plane_vaddr(vb, 0); + u16 soi, eoi; + int len, i; + + soi = be16_to_cpup((__be16 *)vaddr); + if (soi != SOI_MARKER) + return false; + + len = vb2_get_plane_payload(vb, 0); + vaddr += len - 2; + for (i = 0; i < 32; i++) { + eoi = be16_to_cpup((__be16 *)(vaddr - i)); + if (eoi == EOI_MARKER) { + if (i > 0) + vb2_set_plane_payload(vb, 0, len - i); + return true; + } + } - return soi == SOI_MARKER && eoi == EOI_MARKER; + return false; } /* diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 96532b06bd9e..d08e9843e9f2 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -138,7 +138,7 @@ struct coda_buffer_meta { struct list_head list; u32 sequence; struct v4l2_timecode timecode; - struct timeval timestamp; + u64 timestamp; u32 start; u32 end; }; @@ -289,7 +289,7 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf, int coda_h264_padding(int size, char *p); -bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb); +bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb); int coda_jpeg_write_tables(struct coda_ctx *ctx); void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality); diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig index 469e9d28cec0..554e710de487 100644 --- a/drivers/media/platform/davinci/Kconfig +++ b/drivers/media/platform/davinci/Kconfig @@ -3,6 +3,7 @@ config VIDEO_DAVINCI_VPIF_DISPLAY depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on HAS_DMA + depends on I2C select VIDEOBUF2_DMA_CONTIG select VIDEO_ADV7343 if MEDIA_SUBDRV_AUTOSELECT select VIDEO_THS7303 if MEDIA_SUBDRV_AUTOSELECT @@ -19,6 +20,7 @@ config VIDEO_DAVINCI_VPIF_CAPTURE depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on HAS_DMA + depends on I2C select VIDEOBUF2_DMA_CONTIG help Enables Davinci VPIF module used for capture devices. @@ -33,6 +35,7 @@ config VIDEO_DM6446_CCDC depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on HAS_DMA + depends on I2C select VIDEOBUF_DMA_CONTIG help Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces @@ -49,6 +52,7 @@ config VIDEO_DM355_CCDC depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on HAS_DMA + depends on I2C select VIDEOBUF_DMA_CONTIG help Enables DM355 CCD hw module. DM355 CCDC hw interfaces @@ -64,6 +68,7 @@ config VIDEO_DM365_ISIF tristate "TI DM365 ISIF video capture driver" depends on VIDEO_V4L2 && ARCH_DAVINCI depends on HAS_DMA + depends on I2C select VIDEOBUF_DMA_CONTIG help Enables ISIF hw module. This is the hardware module for @@ -77,6 +82,7 @@ config VIDEO_DAVINCI_VPBE_DISPLAY tristate "TI DaVinci VPBE V4L2-Display driver" depends on VIDEO_V4L2 && ARCH_DAVINCI depends on HAS_DMA + depends on I2C select VIDEOBUF2_DMA_CONTIG help Enables Davinci VPBE module used for display devices. diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 6d91422c4e4c..0abcdfe97a6c 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -74,7 +74,7 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj, if (layer->cur_frm == layer->next_frm) return; - v4l2_get_timestamp(&layer->cur_frm->vb.timestamp); + layer->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make cur_frm pointing to next_frm */ layer->cur_frm = layer->next_frm; @@ -228,28 +228,27 @@ static int vpbe_buffer_prepare(struct vb2_buffer *vb) * This function allocates memory for the buffers */ static int -vpbe_buffer_queue_setup(struct vb2_queue *vq, const void *parg, +vpbe_buffer_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; /* Get the file handle object and layer object */ struct vpbe_layer *layer = vb2_get_drv_priv(vq); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n"); - if (fmt && fmt->fmt.pix.sizeimage < layer->pix_fmt.sizeimage) - return -EINVAL; - /* Store number of buffers allocated in numbuffer member */ if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS) *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers; + alloc_ctxs[0] = layer->alloc_ctx; + + if (*nplanes) + return sizes[0] < layer->pix_fmt.sizeimage ? -EINVAL : 0; *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : layer->pix_fmt.sizeimage; - alloc_ctxs[0] = layer->alloc_ctx; + sizes[0] = layer->pix_fmt.sizeimage; return 0; } diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index c1e573b7cc6f..08f7028c7560 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -104,7 +104,6 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb) /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr - * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. @@ -114,26 +113,26 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb) * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct channel_obj *ch = vb2_get_drv_priv(vq); - struct common_obj *common; - - common = &ch->common[VPIF_VIDEO_INDEX]; + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + unsigned size = common->fmt.fmt.pix.sizeimage; vpif_dbg(2, debug, "vpif_buffer_setup\n"); - if (fmt && fmt->fmt.pix.sizeimage < common->fmt.fmt.pix.sizeimage) - return -EINVAL; + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : common->fmt.fmt.pix.sizeimage; + sizes[0] = size; alloc_ctxs[0] = common->alloc_ctx; /* Calculate the offset for Y and C data in the buffer */ @@ -331,7 +330,7 @@ static struct vb2_ops video_qops = { */ static void vpif_process_buffer_complete(struct common_obj *common) { - v4l2_get_timestamp(&common->cur_frm->vb.timestamp); + common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index fd2780306c17..f40755cf1bf2 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -99,7 +99,6 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb) /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr - * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. @@ -109,22 +108,24 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb) * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + unsigned size = common->fmt.fmt.pix.sizeimage; - if (fmt && fmt->fmt.pix.sizeimage < common->fmt.fmt.pix.sizeimage) - return -EINVAL; + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : common->fmt.fmt.pix.sizeimage; + sizes[0] = size; alloc_ctxs[0] = common->alloc_ctx; /* Calculate the offset for Y and C data in the buffer */ @@ -330,7 +331,7 @@ static void process_interlaced_mode(int fid, struct common_obj *common) /* one frame is displayed If next frame is * available, release cur_frm and move on */ /* Copy frame display time */ - v4l2_get_timestamp(&common->cur_frm->vb.timestamp); + common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); /* Change status of the cur_frm */ vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -386,8 +387,8 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) if (!channel_first_int[i][channel_id]) { /* Mark status of the cur_frm to * done and unlock semaphore on it */ - v4l2_get_timestamp( - &common->cur_frm->vb.timestamp); + common->cur_frm->vb.vb2_buf.timestamp = + ktime_get_ns(); vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make cur_frm pointing to next_frm */ diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index d82e717acba7..93782f15b825 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -86,7 +86,7 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state) dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); if (src_vb && dst_vb) { - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->timecode = src_vb->timecode; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= @@ -125,7 +125,7 @@ static int gsc_get_bufs(struct gsc_ctx *ctx) if (ret) return ret; - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; return 0; } @@ -212,7 +212,6 @@ put_device: } static int gsc_m2m_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/exynos4-is/common.c index 0eb34ecb8ee4..b6716c57b5db 100644 --- a/drivers/media/platform/exynos4-is/common.c +++ b/drivers/media/platform/exynos4-is/common.c @@ -10,7 +10,7 @@ */ #include <linux/module.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "common.h" /* Called with the media graph mutex held or entity->stream_count > 0. */ diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c index 99e57320e6f7..0d549a6c8a13 100644 --- a/drivers/media/platform/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/exynos4-is/fimc-capture.c @@ -193,7 +193,7 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf) test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) { v_buf = fimc_active_queue_pop(cap); - v4l2_get_timestamp(&v_buf->vb.timestamp); + v_buf->vb.vb2_buf.timestamp = ktime_get_ns(); v_buf->vb.sequence = cap->frame_count++; vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -338,37 +338,36 @@ int fimc_capture_resume(struct fimc_dev *fimc) } -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { - const struct v4l2_format *pfmt = parg; - const struct v4l2_pix_format_mplane *pixm = NULL; struct fimc_ctx *ctx = vq->drv_priv; struct fimc_frame *frame = &ctx->d_frame; struct fimc_fmt *fmt = frame->fmt; - unsigned long wh; + unsigned long wh = frame->f_width * frame->f_height; int i; - if (pfmt) { - pixm = &pfmt->fmt.pix_mp; - fmt = fimc_find_format(&pixm->pixelformat, NULL, - FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1); - wh = pixm->width * pixm->height; - } else { - wh = frame->f_width * frame->f_height; - } - if (fmt == NULL) return -EINVAL; + if (*num_planes) { + if (*num_planes != fmt->memplanes) + return -EINVAL; + for (i = 0; i < *num_planes; i++) { + if (sizes[i] < (wh * fmt->depth[i]) / 8) + return -EINVAL; + allocators[i] = ctx->fimc_dev->alloc_ctx; + } + return 0; + } + *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { unsigned int size = (wh * fmt->depth[i]) / 8; - if (pixm) - sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); - else if (fimc_fmt_is_user_defined(fmt->color)) + + if (fimc_fmt_is_user_defined(fmt->color)) sizes[i] = frame->payload[i]; else sizes[i] = max_t(u32, size, frame->payload[i]); diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h index d336fa2916df..6b7435453d2a 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.h +++ b/drivers/media/platform/exynos4-is/fimc-core.h @@ -27,7 +27,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-mediabus.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #define dbg(fmt, args...) \ pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args) diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index 6e6648446f00..0dd22ec66694 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -30,7 +30,7 @@ #include <media/v4l2-ioctl.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "common.h" #include "media-dev.h" @@ -39,39 +39,36 @@ #include "fimc-is-param.h" static int isp_video_capture_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { - const struct v4l2_format *pfmt = parg; struct fimc_isp *isp = vb2_get_drv_priv(vq); struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt; - const struct v4l2_pix_format_mplane *pixm = NULL; - const struct fimc_fmt *fmt; + const struct fimc_fmt *fmt = isp->video_capture.format; unsigned int wh, i; - if (pfmt) { - pixm = &pfmt->fmt.pix_mp; - fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, -1); - wh = pixm->width * pixm->height; - } else { - fmt = isp->video_capture.format; - wh = vid_fmt->width * vid_fmt->height; - } + wh = vid_fmt->width * vid_fmt->height; if (fmt == NULL) return -EINVAL; *num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN, FIMC_ISP_REQ_BUFS_MAX); + if (*num_planes) { + if (*num_planes != fmt->memplanes) + return -EINVAL; + for (i = 0; i < *num_planes; i++) { + if (sizes[i] < (wh * fmt->depth[i]) / 8) + return -EINVAL; + allocators[i] = isp->alloc_ctx; + } + return 0; + } + *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { - unsigned int size = (wh * fmt->depth[i]) / 8; - if (pixm) - sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); - else - sizes[i] = size; + sizes[i] = (wh * fmt->depth[i]) / 8; allocators[i] = isp->alloc_ctx; } @@ -254,7 +251,7 @@ void fimc_isp_video_irq_handler(struct fimc_is *is) buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count; vbuf = &video->buffers[buf_index]->vb; - v4l2_get_timestamp(&vbuf->timestamp); + vbuf->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); video->buf_mask &= ~BIT(buf_index); diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h index c2d25df85db9..e0686b5f1bf8 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.h +++ b/drivers/media/platform/exynos4-is/fimc-isp.h @@ -24,7 +24,7 @@ #include <media/videobuf2-v4l2.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> extern int fimc_isp_debug; diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c index 0477716a20db..f0acc550d065 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c +++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c @@ -12,7 +12,7 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/io.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "fimc-lite-reg.h" #include "fimc-lite.h" diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index 60660c3a5de0..639ee710499e 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -30,7 +30,7 @@ #include <media/v4l2-mem2mem.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "common.h" #include "fimc-core.h" @@ -292,7 +292,7 @@ static irqreturn_t flite_irq_handler(int irq, void *priv) test_bit(ST_FLITE_RUN, &fimc->state) && !list_empty(&fimc->active_buf_q)) { vbuf = fimc_lite_active_queue_pop(fimc); - v4l2_get_timestamp(&vbuf->vb.timestamp); + vbuf->vb.vb2_buf.timestamp = ktime_get_ns(); vbuf->vb.sequence = fimc->frame_count++; flite_hw_mask_dma_buffer(fimc, vbuf->index); vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -355,37 +355,34 @@ static void stop_streaming(struct vb2_queue *q) fimc_lite_stop_capture(fimc, false); } -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { - const struct v4l2_format *pfmt = parg; - const struct v4l2_pix_format_mplane *pixm = NULL; struct fimc_lite *fimc = vq->drv_priv; struct flite_frame *frame = &fimc->out_frame; const struct fimc_fmt *fmt = frame->fmt; - unsigned long wh; + unsigned long wh = frame->f_width * frame->f_height; int i; - if (pfmt) { - pixm = &pfmt->fmt.pix_mp; - fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0, -1); - wh = pixm->width * pixm->height; - } else { - wh = frame->f_width * frame->f_height; - } - if (fmt == NULL) return -EINVAL; + if (*num_planes) { + if (*num_planes != fmt->memplanes) + return -EINVAL; + for (i = 0; i < *num_planes; i++) { + if (sizes[i] < (wh * fmt->depth[i]) / 8) + return -EINVAL; + allocators[i] = fimc->alloc_ctx; + } + return 0; + } + *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { - unsigned int size = (wh * fmt->depth[i]) / 8; - if (pixm) - sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); - else - sizes[i] = size; + sizes[i] = (wh * fmt->depth[i]) / 8; allocators[i] = fimc->alloc_ctx; } diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h index b302305dedbe..11690d563e06 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.h +++ b/drivers/media/platform/exynos4-is/fimc-lite.h @@ -23,7 +23,7 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #define FIMC_LITE_DRV_NAME "exynos-fimc-lite" #define FLITE_CLK_NAME "flite" diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c index 4d1d64a46b21..5aa857c7b631 100644 --- a/drivers/media/platform/exynos4-is/fimc-m2m.c +++ b/drivers/media/platform/exynos4-is/fimc-m2m.c @@ -132,7 +132,7 @@ static void fimc_device_run(void *priv) if (ret) goto dma_unlock; - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; @@ -176,7 +176,7 @@ static void fimc_job_abort(void *priv) fimc_m2m_shutdown(priv); } -static int fimc_queue_setup(struct vb2_queue *vq, const void *parg, +static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c index df0cbcb69b6b..0806724553a2 100644 --- a/drivers/media/platform/exynos4-is/fimc-reg.c +++ b/drivers/media/platform/exynos4-is/fimc-reg.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/regmap.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "media-dev.h" #include "fimc-reg.h" diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 4f5586a4cbff..9481ce3201a2 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -31,7 +31,7 @@ #include <media/v4l2-ctrls.h> #include <media/v4l2-of.h> #include <media/media-device.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "media-dev.h" #include "fimc-core.h" diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h index 03214541f149..93a96126929b 100644 --- a/drivers/media/platform/exynos4-is/media-dev.h +++ b/drivers/media/platform/exynos4-is/media-dev.h @@ -19,7 +19,7 @@ #include <media/media-entity.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include "fimc-core.h" #include "fimc-lite.h" diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 4b85105dc159..ff5dabf24694 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -29,7 +29,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/videodev2.h> -#include <media/exynos-fimc.h> +#include <media/drv-intf/exynos-fimc.h> #include <media/v4l2-of.h> #include <media/v4l2-subdev.h> diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 29973f9bf8db..7383818c2be6 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -207,7 +207,7 @@ static void dma_callback(void *data) src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; @@ -798,7 +798,6 @@ struct vb2_dc_conf { }; static int deinterlace_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index aa2b44041d3f..9b878deb1437 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -25,7 +25,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> -#include <media/ov7670.h> +#include <media/i2c/ov7670.h> #include <media/videobuf2-vmalloc.h> #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-dma-sg.h> @@ -226,7 +226,7 @@ static void mcam_buffer_done(struct mcam_camera *cam, int frame, vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage; vbuf->sequence = cam->buf_seq[frame]; vbuf->field = V4L2_FIELD_NONE; - v4l2_get_timestamp(&vbuf->timestamp); + vbuf->vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage); vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); } @@ -1049,24 +1049,25 @@ static int mcam_read_setup(struct mcam_camera *cam) */ static int mcam_vb_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbufs, + unsigned int *nbufs, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct mcam_camera *cam = vb2_get_drv_priv(vq); int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2; + unsigned size = cam->pix_format.sizeimage; - if (fmt && fmt->fmt.pix.sizeimage < cam->pix_format.sizeimage) - return -EINVAL; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : cam->pix_format.sizeimage; - *num_planes = 1; /* Someday we have to support planar formats... */ if (*nbufs < minbufs) *nbufs = minbufs; if (cam->buffer_mode == B_DMA_contig) alloc_ctxs[0] = cam->vb_alloc_ctx; else if (cam->buffer_mode == B_DMA_sg) alloc_ctxs[0] = cam->vb_alloc_ctx_sg; + + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; + sizes[0] = size; + *num_planes = 1; /* Someday we have to support planar formats... */ return 0; } diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index b5f165a68566..816f4b6a7b8e 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -18,7 +18,7 @@ #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> -#include <media/mmp-camera.h> +#include <linux/platform_data/media/mmp-camera.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/gpio.h> diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 03a1b606655d..3c4012d42d69 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -375,7 +375,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data) src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= @@ -689,7 +689,6 @@ static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = { * Queue operations */ static int emmaprp_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index c6e252760c62..b8638e4e1627 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -79,10 +79,12 @@ void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) int j; for (j = 0; j < VRFB_NUM_BUFS; j++) { - omap_vout_free_buffer(vout->smsshado_virt_addr[j], - vout->smsshado_size); - vout->smsshado_virt_addr[j] = 0; - vout->smsshado_phy_addr[j] = 0; + if (vout->smsshado_virt_addr[j]) { + omap_vout_free_buffer(vout->smsshado_virt_addr[j], + vout->smsshado_size); + vout->smsshado_virt_addr[j] = 0; + vout->smsshado_phy_addr[j] = 0; + } } } diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index f4f591652432..ecadca3e945b 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -320,7 +320,6 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) */ static int isp_video_queue_setup(struct vb2_queue *queue, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -467,7 +466,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) list_del(&buf->irqlist); spin_unlock_irqrestore(&video->irqlock, flags); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); /* Do frame number propagation only if this is the output video node. * Frame number either comes from the CSI receivers or it gets diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index f8e3e83c52a2..485f5259acb0 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1015,28 +1015,33 @@ error_free: * ============================================================================ */ static int jpu_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct jpu_ctx *ctx = vb2_get_drv_priv(vq); struct jpu_q_data *q_data; unsigned int i; q_data = jpu_get_q_data(ctx, vq->type); - *nplanes = q_data->format.num_planes; + if (*nplanes) { + if (*nplanes != q_data->format.num_planes) + return -EINVAL; - for (i = 0; i < *nplanes; i++) { - unsigned int q_size = q_data->format.plane_fmt[i].sizeimage; - unsigned int f_size = fmt ? - fmt->fmt.pix_mp.plane_fmt[i].sizeimage : 0; + for (i = 0; i < *nplanes; i++) { + unsigned int q_size = q_data->format.plane_fmt[i].sizeimage; - if (fmt && f_size < q_size) - return -EINVAL; + if (sizes[i] < q_size) + return -EINVAL; + alloc_ctxs[i] = ctx->jpu->alloc_ctx; + } + return 0; + } - sizes[i] = fmt ? f_size : q_size; + *nplanes = q_data->format.num_planes; + + for (i = 0; i < *nplanes; i++) { + sizes[i] = q_data->format.plane_fmt[i].sizeimage; alloc_ctxs[i] = ctx->jpu->alloc_ctx; } @@ -1300,17 +1305,17 @@ static int jpu_release(struct file *file) struct jpu *jpu = video_drvdata(file); struct jpu_ctx *ctx = fh_to_ctx(file->private_data); - mutex_lock(&jpu->mutex); - if (--jpu->ref_count == 0) - clk_disable_unprepare(jpu->clk); - mutex_unlock(&jpu->mutex); - v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); + mutex_lock(&jpu->mutex); + if (--jpu->ref_count == 0) + clk_disable_unprepare(jpu->clk); + mutex_unlock(&jpu->mutex); + return 0; } @@ -1560,12 +1565,9 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id) } dst_buf->field = src_buf->field; - dst_buf->timestamp = src_buf->timestamp; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE) dst_buf->timecode = src_buf->timecode; - dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; - dst_buf->flags |= src_buf->flags & - V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->flags = src_buf->flags & (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 537b858cb94a..ec3abbed87d9 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -338,7 +338,7 @@ irqreturn_t s3c_camif_irq_handler(int irq, void *priv) if (!WARN_ON(vbuf == NULL)) { /* Dequeue a filled buffer */ - v4l2_get_timestamp(&vbuf->vb.timestamp); + vbuf->vb.vb2_buf.timestamp = ktime_get_ns(); vbuf->vb.sequence = vp->frame_sequence++; vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); @@ -435,39 +435,28 @@ static void stop_streaming(struct vb2_queue *vq) camif_stop_capture(vp); } -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { - const struct v4l2_format *pfmt = parg; - const struct v4l2_pix_format *pix = NULL; struct camif_vp *vp = vb2_get_drv_priv(vq); struct camif_dev *camif = vp->camif; struct camif_frame *frame = &vp->out_frame; - const struct camif_fmt *fmt; + const struct camif_fmt *fmt = vp->out_fmt; unsigned int size; - if (pfmt) { - pix = &pfmt->fmt.pix; - fmt = s3c_camif_find_format(vp, &pix->pixelformat, -1); - if (fmt == NULL) - return -EINVAL; - size = (pix->width * pix->height * fmt->depth) / 8; - } else { - fmt = vp->out_fmt; - if (fmt == NULL) - return -EINVAL; - size = (frame->f_width * frame->f_height * fmt->depth) / 8; - } - - *num_planes = 1; + if (fmt == NULL) + return -EINVAL; - if (pix) - sizes[0] = max(size, pix->sizeimage); - else - sizes[0] = size; + size = (frame->f_width * frame->f_height * fmt->depth) / 8; allocators[0] = camif->alloc_ctx; + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; + + *num_planes = 1; + sizes[0] = size; + pr_debug("size: %u\n", sizes[0]); return 0; } diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h index adaf1969ef63..57cbc3d9725d 100644 --- a/drivers/media/platform/s3c-camif/camif-core.h +++ b/drivers/media/platform/s3c-camif/camif-core.h @@ -26,7 +26,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/videobuf2-v4l2.h> -#include <media/s3c_camif.h> +#include <media/drv-intf/s3c_camif.h> #define S3C_CAMIF_DRIVER_NAME "s3c-camif" #define CAMIF_REQ_BUFS_MIN 3 diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h index af2d472ea1dd..5ad36c1c2a5d 100644 --- a/drivers/media/platform/s3c-camif/camif-regs.h +++ b/drivers/media/platform/s3c-camif/camif-regs.h @@ -13,7 +13,7 @@ #define CAMIF_REGS_H_ #include "camif-core.h" -#include <media/s3c_camif.h> +#include <media/drv-intf/s3c_camif.h> /* * The id argument indicates the processing path: diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index e1936d9d27da..74bd46ca7942 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -101,7 +101,7 @@ static struct g2d_frame *get_frame(struct g2d_ctx *ctx, } } -static int g2d_queue_setup(struct vb2_queue *vq, const void *parg, +static int g2d_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -552,7 +552,7 @@ static irqreturn_t g2d_isr(int irq, void *prv) BUG_ON(dst == NULL); dst->timecode = src->timecode; - dst->timestamp = src->timestamp; + dst->vb2_buf.timestamp = src->vb2_buf.timestamp; dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 4a608cbe0fdb..c3b13a630edf 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -2430,7 +2430,6 @@ static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = { */ static int s5p_jpeg_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -2621,7 +2620,7 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id) } dst_buf->timecode = src_buf->timecode; - dst_buf->timestamp = src_buf->timestamp; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; @@ -2752,7 +2751,7 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id) dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); dst_buf->timecode = src_buf->timecode; - dst_buf->timestamp = src_buf->timestamp; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; v4l2_m2m_buf_done(src_buf, state); if (curr_ctx->mode == S5P_JPEG_ENCODE) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 3ffe2ecfd5ef..927ab4928779 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -85,6 +85,26 @@ void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx) spin_unlock_irqrestore(&dev->condlock, flags); } +int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) +{ + unsigned long flags; + int ctx; + + spin_lock_irqsave(&dev->condlock, flags); + ctx = dev->curr_ctx; + do { + ctx = (ctx + 1) % MFC_NUM_CONTEXTS; + if (ctx == dev->curr_ctx) { + if (!test_bit(ctx, &dev->ctx_work_bits)) + ctx = -EAGAIN; + break; + } + } while (!test_bit(ctx, &dev->ctx_work_bits)); + spin_unlock_irqrestore(&dev->condlock, flags); + + return ctx; +} + /* Wake up context wait_queue */ static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) @@ -105,6 +125,20 @@ static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason, wake_up(&dev->queue); } +void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq) +{ + struct s5p_mfc_buf *b; + int i; + + while (!list_empty(lh)) { + b = list_entry(lh->next, struct s5p_mfc_buf, list); + for (i = 0; i < b->b->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&b->b->vb2_buf, i, 0); + vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR); + list_del(&b->list); + } +} + static void s5p_mfc_watchdog(unsigned long arg) { struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg; @@ -150,10 +184,8 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work) if (!ctx) continue; ctx->state = MFCINST_ERROR; - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->dst_queue, &ctx->vq_dst); - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->src_queue, &ctx->vq_src); + s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); + s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); clear_work_bit(ctx); wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0); } @@ -233,8 +265,8 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) == dec_y_addr) { dst_buf->b->timecode = src_buf->b->timecode; - dst_buf->b->timestamp = - src_buf->b->timestamp; + dst_buf->b->vb2_buf.timestamp = + src_buf->b->vb2_buf.timestamp; dst_buf->b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->b->flags |= @@ -327,7 +359,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, unsigned int dst_frame_status; unsigned int dec_frame_status; struct s5p_mfc_buf *src_buf; - unsigned long flags; unsigned int res_change; dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev) @@ -343,17 +374,16 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, if (res_change == S5P_FIMV_RES_INCREASE || res_change == S5P_FIMV_RES_DECREASE) { ctx->state = MFCINST_RES_CHANGE_INIT; - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); return; } if (ctx->dpb_flush_flag) ctx->dpb_flush_flag = 0; - spin_lock_irqsave(&dev->irqlock, flags); /* All frames remaining in the buffer have been extracted */ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) { if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { @@ -413,11 +443,10 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, } } leave_handle_frame: - spin_unlock_irqrestore(&dev->irqlock, flags); if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) || ctx->dst_queue_cnt < ctx->pb_count) clear_work_bit(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); @@ -425,15 +454,13 @@ leave_handle_frame: if (test_bit(0, &dev->enter_suspend)) wake_up_dev(dev, reason, err); else - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } /* Error handling for interrupt */ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { - unsigned long flags; - mfc_err("Interrupt Error: %08x\n", err); if (ctx != NULL) { @@ -450,13 +477,9 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, clear_work_bit(ctx); ctx->state = MFCINST_ERROR; /* Mark all dst buffers as having an error */ - spin_lock_irqsave(&dev->irqlock, flags); - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->dst_queue, &ctx->vq_dst); + s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); /* Mark all src buffers as having an error */ - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->src_queue, &ctx->vq_src); - spin_unlock_irqrestore(&dev->irqlock, flags); + s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); wake_up_ctx(ctx, reason, err); break; default: @@ -467,7 +490,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, } } WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); s5p_mfc_clock_off(); wake_up_dev(dev, reason, err); return; @@ -491,7 +514,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height, dev); - s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx); + s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, dev); @@ -518,11 +541,11 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, ctx->head_processed = 1; } } - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); clear_work_bit(ctx); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); wake_up_ctx(ctx, reason, err); } @@ -532,12 +555,11 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, { struct s5p_mfc_buf *src_buf; struct s5p_mfc_dev *dev; - unsigned long flags; if (ctx == NULL) return; dev = ctx->dev; - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; ctx->int_cond = 1; @@ -545,7 +567,6 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, if (err == 0) { ctx->state = MFCINST_RUNNING; if (!ctx->dpb_flush_flag && ctx->head_processed) { - spin_lock_irqsave(&dev->irqlock, flags); if (!list_empty(&ctx->src_queue)) { src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); @@ -554,7 +575,6 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, vb2_buffer_done(&src_buf->b->vb2_buf, VB2_BUF_STATE_DONE); } - spin_unlock_irqrestore(&dev->irqlock, flags); } else { ctx->dpb_flush_flag = 0; } @@ -563,7 +583,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, s5p_mfc_clock_off(); wake_up(&ctx->queue); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); @@ -582,7 +602,6 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) ctx->state = MFCINST_FINISHED; - spin_lock(&dev->irqlock); if (!list_empty(&ctx->dst_queue)) { mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); @@ -591,7 +610,6 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); } - spin_unlock(&dev->irqlock); clear_work_bit(ctx); @@ -599,7 +617,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) s5p_mfc_clock_off(); wake_up(&ctx->queue); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } /* Interrupt processing */ @@ -613,6 +631,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) mfc_debug_enter(); /* Reset the timeout watchdog */ atomic_set(&dev->watchdog_cnt, 0); + spin_lock(&dev->irqlock); ctx = dev->ctx[dev->curr_ctx]; /* Get the reason of interrupt and the error code */ reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev); @@ -639,15 +658,15 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) if (ctx->state == MFCINST_FINISHING && list_empty(&ctx->ref_queue)) { - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); s5p_mfc_handle_stream_complete(ctx); break; } - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { s5p_mfc_handle_frame(ctx, reason, err); } @@ -677,7 +696,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) case S5P_MFC_R2H_CMD_WAKEUP_RET: if (ctx) clear_work_bit(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_dev(dev, reason, err); clear_bit(0, &dev->hw_lock); clear_bit(0, &dev->enter_suspend); @@ -688,7 +707,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) break; case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET: - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; s5p_mfc_handle_stream_complete(ctx); @@ -702,12 +721,13 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) default: mfc_debug(2, "Unknown int reason\n"); - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); } + spin_unlock(&dev->irqlock); mfc_debug_leave(); return IRQ_HANDLED; irq_cleanup_hw: - s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; ctx->int_cond = 1; @@ -716,7 +736,8 @@ irq_cleanup_hw: s5p_mfc_clock_off(); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + spin_unlock(&dev->irqlock); mfc_debug(2, "Exit via irq_cleanup_hw\n"); return IRQ_HANDLED; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index d1a3f9b1bc44..9eb2481ec292 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -308,7 +308,7 @@ struct s5p_mfc_dev { struct s5p_mfc_pm pm; struct s5p_mfc_variant *variant; int num_inst; - spinlock_t irqlock; /* lock when operating on videobuf2 queues */ + spinlock_t irqlock; /* lock when operating on context */ spinlock_t condlock; /* lock when changing/checking if a context is ready to be processed */ struct mutex mfc_mutex; /* video_device lock */ @@ -653,7 +653,7 @@ struct s5p_mfc_ctx { unsigned int bits; } slice_size; - struct s5p_mfc_codec_ops *c_ops; + const struct s5p_mfc_codec_ops *c_ops; struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS]; struct v4l2_ctrl_handler ctrl_handler; @@ -694,13 +694,7 @@ struct mfc_control { /* Macro for making hardware specific calls */ #define s5p_mfc_hw_call(f, op, args...) \ - ((f && f->op) ? f->op(args) : -ENODEV) - -#define s5p_mfc_hw_call_void(f, op, args...) \ -do { \ - if (f && f->op) \ - f->op(args); \ -} while (0) + ((f && f->op) ? f->op(args) : (typeof(f->op(args)))(-ENODEV)) #define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh) #define ctrl_to_ctx(__ctrl) \ @@ -710,6 +704,8 @@ void clear_work_bit(struct s5p_mfc_ctx *ctx); void set_work_bit(struct s5p_mfc_ctx *ctx); void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx); void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx); +int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev); +void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq); #define HAS_PORTNUM(dev) (dev ? (dev->variant ? \ (dev->variant->port_num ? 1 : 0) : 0) : 0) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 40d8a03a141d..cc888713b3b6 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -319,7 +319,7 @@ void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev) s5p_mfc_clock_on(); s5p_mfc_reset(dev); - s5p_mfc_hw_call_void(dev->mfc_ops, release_dev_context_buffer, dev); + s5p_mfc_hw_call(dev->mfc_ops, release_dev_context_buffer, dev); s5p_mfc_clock_off(); } @@ -468,7 +468,7 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) } set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); if (s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) { /* Error or timeout */ @@ -482,9 +482,9 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) err_free_desc_buf: if (ctx->type == MFCINST_DECODER) - s5p_mfc_hw_call_void(dev->mfc_ops, release_dec_desc_buffer, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx); err_free_inst_buf: - s5p_mfc_hw_call_void(dev->mfc_ops, release_instance_buffer, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx); err: return ret; } @@ -493,17 +493,17 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) { ctx->state = MFCINST_RETURN_INST; set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); /* Wait until instance is returned or timeout occurred */ if (s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) mfc_err("Err returning instance\n"); /* Free resources */ - s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, release_instance_buffer, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx); if (ctx->type == MFCINST_DECODER) - s5p_mfc_hw_call_void(dev->mfc_ops, release_dec_desc_buffer, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx); ctx->inst_no = MFC_NO_INSTANCE_SET; ctx->state = MFCINST_FREE; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 8c5060a7534f..f2d6376ce618 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -252,7 +252,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) return 0; } -static struct s5p_mfc_codec_ops decoder_codec_ops = { +static const struct s5p_mfc_codec_ops decoder_codec_ops = { .pre_seq_start = NULL, .post_seq_start = NULL, .pre_frame_start = NULL, @@ -523,7 +523,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); if (ret) goto out; - s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx); + s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); ctx->dst_bufs_cnt = 0; } else if (ctx->capture_state == QUEUE_FREE) { WARN_ON(ctx->dst_bufs_cnt != 0); @@ -551,7 +551,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_INIT_BUFFERS_RET, 0); } else { @@ -831,7 +831,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv, if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { mfc_err("EOS: marking last buffer of stream"); buf = list_entry(ctx->src_queue.prev, @@ -888,7 +888,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = { }; static int s5p_mfc_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *buf_count, + unsigned int *buf_count, unsigned int *plane_count, unsigned int psize[], void *allocators[]) { @@ -1012,7 +1012,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); return 0; } @@ -1023,42 +1023,41 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q) struct s5p_mfc_dev *dev = ctx->dev; int aborted = 0; + spin_lock_irqsave(&dev->irqlock, flags); if ((ctx->state == MFCINST_FINISHING || ctx->state == MFCINST_RUNNING) && dev->curr_ctx == ctx->num && dev->hw_lock) { ctx->state = MFCINST_ABORT; + spin_unlock_irqrestore(&dev->irqlock, flags); s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0); aborted = 1; + spin_lock_irqsave(&dev->irqlock, flags); } if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - spin_lock_irqsave(&dev->irqlock, flags); - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->dst_queue, &ctx->vq_dst); + s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); INIT_LIST_HEAD(&ctx->dst_queue); ctx->dst_queue_cnt = 0; ctx->dpb_flush_flag = 1; ctx->dec_dst_flag = 0; - spin_unlock_irqrestore(&dev->irqlock, flags); if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { ctx->state = MFCINST_FLUSH; set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + spin_unlock_irqrestore(&dev->irqlock, flags); if (s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0)) mfc_err("Err flushing buffers\n"); + spin_lock_irqsave(&dev->irqlock, flags); } - } - if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - spin_lock_irqsave(&dev->irqlock, flags); - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->src_queue, &ctx->vq_src); + } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); INIT_LIST_HEAD(&ctx->src_queue); ctx->src_queue_cnt = 0; - spin_unlock_irqrestore(&dev->irqlock, flags); } if (aborted) ctx->state = MFCINST_RUNNING; + spin_unlock_irqrestore(&dev->irqlock, flags); } @@ -1091,7 +1090,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb) } if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } static struct vb2_ops s5p_mfc_dec_qops = { @@ -1104,7 +1103,7 @@ static struct vb2_ops s5p_mfc_dec_qops = { .buf_queue = s5p_mfc_buf_queue, }; -struct s5p_mfc_codec_ops *get_dec_codec_ops(void) +const struct s5p_mfc_codec_ops *get_dec_codec_ops(void) { return &decoder_codec_ops; } @@ -1119,7 +1118,7 @@ const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void) return &s5p_mfc_dec_ioctl_ops; } -#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \ +#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2WHICH(x) == V4L2_CTRL_CLASS_MPEG) \ && V4L2_CTRL_DRIVER_PRIV(x)) int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h index d06a7cab5eb1..886628b153f0 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h @@ -13,7 +13,7 @@ #ifndef S5P_MFC_DEC_H_ #define S5P_MFC_DEC_H_ -struct s5p_mfc_codec_ops *get_dec_codec_ops(void); +const struct s5p_mfc_codec_ops *get_dec_codec_ops(void); struct vb2_ops *get_dec_queue_ops(void); const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void); struct s5p_mfc_fmt *get_dec_def_fmt(bool src); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 5c678ec9c9f2..0434f02a7175 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -769,15 +769,12 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; - unsigned long flags; - spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); - s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, + s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); return 0; } @@ -786,11 +783,9 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_buf *dst_mb; - unsigned long flags; unsigned int enc_pb_count; if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { - spin_lock_irqsave(&dev->irqlock, flags); if (!list_empty(&ctx->dst_queue)) { dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); @@ -802,14 +797,13 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) vb2_buffer_done(&dst_mb->b->vb2_buf, VB2_BUF_STATE_DONE); } - spin_unlock_irqrestore(&dev->irqlock, flags); } if (!IS_MFCV6_PLUS(dev)) { ctx->state = MFCINST_RUNNING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_enc_dpb_count, dev); @@ -826,25 +820,20 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; - unsigned long flags; unsigned long src_y_addr, src_c_addr, dst_addr; unsigned int dst_size; - spin_lock_irqsave(&dev->irqlock, flags); src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1); - s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx, + s5p_mfc_hw_call(dev->mfc_ops, set_enc_frame_buffer, ctx, src_y_addr, src_c_addr); - spin_unlock_irqrestore(&dev->irqlock, flags); - spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); - s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, + s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); return 0; } @@ -857,7 +846,6 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) unsigned long mb_y_addr, mb_c_addr; int slice_type; unsigned int strm_size; - unsigned long flags; slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); @@ -865,9 +853,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) mfc_debug(2, "Encoded stream size: %d\n", strm_size); mfc_debug(2, "Display order: %d\n", mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); - spin_lock_irqsave(&dev->irqlock, flags); if (slice_type >= 0) { - s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx, + s5p_mfc_hw_call(dev->mfc_ops, get_enc_frame_buffer, ctx, &enc_y_addr, &enc_c_addr); list_for_each_entry(mb_entry, &ctx->src_queue, list) { mb_y_addr = vb2_dma_contig_plane_dma_addr( @@ -929,14 +916,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); } - spin_unlock_irqrestore(&dev->irqlock, flags); if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) clear_work_bit(ctx); return 0; } -static struct s5p_mfc_codec_ops encoder_codec_ops = { +static const struct s5p_mfc_codec_ops encoder_codec_ops = { .pre_seq_start = enc_pre_seq_start, .post_seq_start = enc_post_seq_start, .pre_frame_start = enc_pre_frame_start, @@ -1120,7 +1106,7 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) pix_fmt_mp->width, pix_fmt_mp->height, ctx->img_width, ctx->img_height); - s5p_mfc_hw_call_void(dev->mfc_ops, enc_calc_src_size, ctx); + s5p_mfc_hw_call(dev->mfc_ops, enc_calc_src_size, ctx); pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size; pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width; pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size; @@ -1178,7 +1164,7 @@ static int vidioc_reqbufs(struct file *file, void *priv, if (reqbufs->count == 0) { mfc_debug(2, "Freeing buffers\n"); ret = vb2_reqbufs(&ctx->vq_src, reqbufs); - s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, + s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); ctx->output_state = QUEUE_FREE; return ret; @@ -1741,7 +1727,7 @@ static int vidioc_encoder_cmd(struct file *file, void *priv, if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { mfc_debug(2, "EOS: marking last buffer of stream\n"); buf = list_entry(ctx->src_queue.prev, @@ -1818,7 +1804,6 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb) } static int s5p_mfc_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *buf_count, unsigned int *plane_count, unsigned int psize[], void *allocators[]) { @@ -1969,7 +1954,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); return 0; } @@ -1990,15 +1975,13 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q) ctx->state = MFCINST_FINISHED; spin_lock_irqsave(&dev->irqlock, flags); if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, - &ctx->dst_queue, &ctx->vq_dst); + s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); INIT_LIST_HEAD(&ctx->dst_queue); ctx->dst_queue_cnt = 0; } if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { cleanup_ref_queue(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, &ctx->src_queue, - &ctx->vq_src); + s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); INIT_LIST_HEAD(&ctx->src_queue); ctx->src_queue_cnt = 0; } @@ -2038,7 +2021,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb) } if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); - s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } static struct vb2_ops s5p_mfc_enc_qops = { @@ -2052,7 +2035,7 @@ static struct vb2_ops s5p_mfc_enc_qops = { .buf_queue = s5p_mfc_buf_queue, }; -struct s5p_mfc_codec_ops *get_enc_codec_ops(void) +const struct s5p_mfc_codec_ops *get_enc_codec_ops(void) { return &encoder_codec_ops; } @@ -2067,7 +2050,7 @@ const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void) return &s5p_mfc_enc_ioctl_ops; } -#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \ +#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2WHICH(x) == V4L2_CTRL_CLASS_MPEG) \ && V4L2_CTRL_DRIVER_PRIV(x)) int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h index 5118d46b3a9e..d0d42f818832 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h @@ -13,7 +13,7 @@ #ifndef S5P_MFC_ENC_H_ #define S5P_MFC_ENC_H_ -struct s5p_mfc_codec_ops *get_enc_codec_ops(void); +const struct s5p_mfc_codec_ops *get_enc_codec_ops(void); struct vb2_ops *get_enc_queue_ops(void); const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void); struct s5p_mfc_fmt *get_enc_def_fmt(bool src); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h index 77a08b19b46d..b6ac417ab63e 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h @@ -20,254 +20,254 @@ struct s5p_mfc_regs { /* codec common registers */ - volatile void __iomem *risc_on; - volatile void __iomem *risc2host_int; - volatile void __iomem *host2risc_int; - volatile void __iomem *risc_base_address; - volatile void __iomem *mfc_reset; - volatile void __iomem *host2risc_command; - volatile void __iomem *risc2host_command; - volatile void __iomem *mfc_bus_reset_ctrl; - volatile void __iomem *firmware_version; - volatile void __iomem *instance_id; - volatile void __iomem *codec_type; - volatile void __iomem *context_mem_addr; - volatile void __iomem *context_mem_size; - volatile void __iomem *pixel_format; - volatile void __iomem *metadata_enable; - volatile void __iomem *mfc_version; - volatile void __iomem *dbg_info_enable; - volatile void __iomem *dbg_buffer_addr; - volatile void __iomem *dbg_buffer_size; - volatile void __iomem *hed_control; - volatile void __iomem *mfc_timeout_value; - volatile void __iomem *hed_shared_mem_addr; - volatile void __iomem *dis_shared_mem_addr;/* only v7 */ - volatile void __iomem *ret_instance_id; - volatile void __iomem *error_code; - volatile void __iomem *dbg_buffer_output_size; - volatile void __iomem *metadata_status; - volatile void __iomem *metadata_addr_mb_info; - volatile void __iomem *metadata_size_mb_info; - volatile void __iomem *dbg_info_stage_counter; + void __iomem *risc_on; + void __iomem *risc2host_int; + void __iomem *host2risc_int; + void __iomem *risc_base_address; + void __iomem *mfc_reset; + void __iomem *host2risc_command; + void __iomem *risc2host_command; + void __iomem *mfc_bus_reset_ctrl; + void __iomem *firmware_version; + void __iomem *instance_id; + void __iomem *codec_type; + void __iomem *context_mem_addr; + void __iomem *context_mem_size; + void __iomem *pixel_format; + void __iomem *metadata_enable; + void __iomem *mfc_version; + void __iomem *dbg_info_enable; + void __iomem *dbg_buffer_addr; + void __iomem *dbg_buffer_size; + void __iomem *hed_control; + void __iomem *mfc_timeout_value; + void __iomem *hed_shared_mem_addr; + void __iomem *dis_shared_mem_addr;/* only v7 */ + void __iomem *ret_instance_id; + void __iomem *error_code; + void __iomem *dbg_buffer_output_size; + void __iomem *metadata_status; + void __iomem *metadata_addr_mb_info; + void __iomem *metadata_size_mb_info; + void __iomem *dbg_info_stage_counter; /* decoder registers */ - volatile void __iomem *d_crc_ctrl; - volatile void __iomem *d_dec_options; - volatile void __iomem *d_display_delay; - volatile void __iomem *d_set_frame_width; - volatile void __iomem *d_set_frame_height; - volatile void __iomem *d_sei_enable; - volatile void __iomem *d_min_num_dpb; - volatile void __iomem *d_min_first_plane_dpb_size; - volatile void __iomem *d_min_second_plane_dpb_size; - volatile void __iomem *d_min_third_plane_dpb_size;/* only v8 */ - volatile void __iomem *d_min_num_mv; - volatile void __iomem *d_mvc_num_views; - volatile void __iomem *d_min_num_dis;/* only v7 */ - volatile void __iomem *d_min_first_dis_size;/* only v7 */ - volatile void __iomem *d_min_second_dis_size;/* only v7 */ - volatile void __iomem *d_min_third_dis_size;/* only v7 */ - volatile void __iomem *d_post_filter_luma_dpb0;/* v7 and v8 */ - volatile void __iomem *d_post_filter_luma_dpb1;/* v7 and v8 */ - volatile void __iomem *d_post_filter_luma_dpb2;/* only v7 */ - volatile void __iomem *d_post_filter_chroma_dpb0;/* v7 and v8 */ - volatile void __iomem *d_post_filter_chroma_dpb1;/* v7 and v8 */ - volatile void __iomem *d_post_filter_chroma_dpb2;/* only v7 */ - volatile void __iomem *d_num_dpb; - volatile void __iomem *d_num_mv; - volatile void __iomem *d_init_buffer_options; - volatile void __iomem *d_first_plane_dpb_stride_size;/* only v8 */ - volatile void __iomem *d_second_plane_dpb_stride_size;/* only v8 */ - volatile void __iomem *d_third_plane_dpb_stride_size;/* only v8 */ - volatile void __iomem *d_first_plane_dpb_size; - volatile void __iomem *d_second_plane_dpb_size; - volatile void __iomem *d_third_plane_dpb_size;/* only v8 */ - volatile void __iomem *d_mv_buffer_size; - volatile void __iomem *d_first_plane_dpb; - volatile void __iomem *d_second_plane_dpb; - volatile void __iomem *d_third_plane_dpb; - volatile void __iomem *d_mv_buffer; - volatile void __iomem *d_scratch_buffer_addr; - volatile void __iomem *d_scratch_buffer_size; - volatile void __iomem *d_metadata_buffer_addr; - volatile void __iomem *d_metadata_buffer_size; - volatile void __iomem *d_nal_start_options;/* v7 and v8 */ - volatile void __iomem *d_cpb_buffer_addr; - volatile void __iomem *d_cpb_buffer_size; - volatile void __iomem *d_available_dpb_flag_upper; - volatile void __iomem *d_available_dpb_flag_lower; - volatile void __iomem *d_cpb_buffer_offset; - volatile void __iomem *d_slice_if_enable; - volatile void __iomem *d_picture_tag; - volatile void __iomem *d_stream_data_size; - volatile void __iomem *d_dynamic_dpb_flag_upper;/* v7 and v8 */ - volatile void __iomem *d_dynamic_dpb_flag_lower;/* v7 and v8 */ - volatile void __iomem *d_display_frame_width; - volatile void __iomem *d_display_frame_height; - volatile void __iomem *d_display_status; - volatile void __iomem *d_display_first_plane_addr; - volatile void __iomem *d_display_second_plane_addr; - volatile void __iomem *d_display_third_plane_addr;/* only v8 */ - volatile void __iomem *d_display_frame_type; - volatile void __iomem *d_display_crop_info1; - volatile void __iomem *d_display_crop_info2; - volatile void __iomem *d_display_picture_profile; - volatile void __iomem *d_display_luma_crc;/* v7 and v8 */ - volatile void __iomem *d_display_chroma0_crc;/* v7 and v8 */ - volatile void __iomem *d_display_chroma1_crc;/* only v8 */ - volatile void __iomem *d_display_luma_crc_top;/* only v6 */ - volatile void __iomem *d_display_chroma_crc_top;/* only v6 */ - volatile void __iomem *d_display_luma_crc_bot;/* only v6 */ - volatile void __iomem *d_display_chroma_crc_bot;/* only v6 */ - volatile void __iomem *d_display_aspect_ratio; - volatile void __iomem *d_display_extended_ar; - volatile void __iomem *d_decoded_frame_width; - volatile void __iomem *d_decoded_frame_height; - volatile void __iomem *d_decoded_status; - volatile void __iomem *d_decoded_first_plane_addr; - volatile void __iomem *d_decoded_second_plane_addr; - volatile void __iomem *d_decoded_third_plane_addr;/* only v8 */ - volatile void __iomem *d_decoded_frame_type; - volatile void __iomem *d_decoded_crop_info1; - volatile void __iomem *d_decoded_crop_info2; - volatile void __iomem *d_decoded_picture_profile; - volatile void __iomem *d_decoded_nal_size; - volatile void __iomem *d_decoded_luma_crc; - volatile void __iomem *d_decoded_chroma0_crc; - volatile void __iomem *d_decoded_chroma1_crc;/* only v8 */ - volatile void __iomem *d_ret_picture_tag_top; - volatile void __iomem *d_ret_picture_tag_bot; - volatile void __iomem *d_ret_picture_time_top; - volatile void __iomem *d_ret_picture_time_bot; - volatile void __iomem *d_chroma_format; - volatile void __iomem *d_vc1_info;/* v7 and v8 */ - volatile void __iomem *d_mpeg4_info; - volatile void __iomem *d_h264_info; - volatile void __iomem *d_metadata_addr_concealed_mb; - volatile void __iomem *d_metadata_size_concealed_mb; - volatile void __iomem *d_metadata_addr_vc1_param; - volatile void __iomem *d_metadata_size_vc1_param; - volatile void __iomem *d_metadata_addr_sei_nal; - volatile void __iomem *d_metadata_size_sei_nal; - volatile void __iomem *d_metadata_addr_vui; - volatile void __iomem *d_metadata_size_vui; - volatile void __iomem *d_metadata_addr_mvcvui;/* v7 and v8 */ - volatile void __iomem *d_metadata_size_mvcvui;/* v7 and v8 */ - volatile void __iomem *d_mvc_view_id; - volatile void __iomem *d_frame_pack_sei_avail; - volatile void __iomem *d_frame_pack_arrgment_id; - volatile void __iomem *d_frame_pack_sei_info; - volatile void __iomem *d_frame_pack_grid_pos; - volatile void __iomem *d_display_recovery_sei_info;/* v7 and v8 */ - volatile void __iomem *d_decoded_recovery_sei_info;/* v7 and v8 */ - volatile void __iomem *d_display_first_addr;/* only v7 */ - volatile void __iomem *d_display_second_addr;/* only v7 */ - volatile void __iomem *d_display_third_addr;/* only v7 */ - volatile void __iomem *d_decoded_first_addr;/* only v7 */ - volatile void __iomem *d_decoded_second_addr;/* only v7 */ - volatile void __iomem *d_decoded_third_addr;/* only v7 */ - volatile void __iomem *d_used_dpb_flag_upper;/* v7 and v8 */ - volatile void __iomem *d_used_dpb_flag_lower;/* v7 and v8 */ + void __iomem *d_crc_ctrl; + void __iomem *d_dec_options; + void __iomem *d_display_delay; + void __iomem *d_set_frame_width; + void __iomem *d_set_frame_height; + void __iomem *d_sei_enable; + void __iomem *d_min_num_dpb; + void __iomem *d_min_first_plane_dpb_size; + void __iomem *d_min_second_plane_dpb_size; + void __iomem *d_min_third_plane_dpb_size;/* only v8 */ + void __iomem *d_min_num_mv; + void __iomem *d_mvc_num_views; + void __iomem *d_min_num_dis;/* only v7 */ + void __iomem *d_min_first_dis_size;/* only v7 */ + void __iomem *d_min_second_dis_size;/* only v7 */ + void __iomem *d_min_third_dis_size;/* only v7 */ + void __iomem *d_post_filter_luma_dpb0;/* v7 and v8 */ + void __iomem *d_post_filter_luma_dpb1;/* v7 and v8 */ + void __iomem *d_post_filter_luma_dpb2;/* only v7 */ + void __iomem *d_post_filter_chroma_dpb0;/* v7 and v8 */ + void __iomem *d_post_filter_chroma_dpb1;/* v7 and v8 */ + void __iomem *d_post_filter_chroma_dpb2;/* only v7 */ + void __iomem *d_num_dpb; + void __iomem *d_num_mv; + void __iomem *d_init_buffer_options; + void __iomem *d_first_plane_dpb_stride_size;/* only v8 */ + void __iomem *d_second_plane_dpb_stride_size;/* only v8 */ + void __iomem *d_third_plane_dpb_stride_size;/* only v8 */ + void __iomem *d_first_plane_dpb_size; + void __iomem *d_second_plane_dpb_size; + void __iomem *d_third_plane_dpb_size;/* only v8 */ + void __iomem *d_mv_buffer_size; + void __iomem *d_first_plane_dpb; + void __iomem *d_second_plane_dpb; + void __iomem *d_third_plane_dpb; + void __iomem *d_mv_buffer; + void __iomem *d_scratch_buffer_addr; + void __iomem *d_scratch_buffer_size; + void __iomem *d_metadata_buffer_addr; + void __iomem *d_metadata_buffer_size; + void __iomem *d_nal_start_options;/* v7 and v8 */ + void __iomem *d_cpb_buffer_addr; + void __iomem *d_cpb_buffer_size; + void __iomem *d_available_dpb_flag_upper; + void __iomem *d_available_dpb_flag_lower; + void __iomem *d_cpb_buffer_offset; + void __iomem *d_slice_if_enable; + void __iomem *d_picture_tag; + void __iomem *d_stream_data_size; + void __iomem *d_dynamic_dpb_flag_upper;/* v7 and v8 */ + void __iomem *d_dynamic_dpb_flag_lower;/* v7 and v8 */ + void __iomem *d_display_frame_width; + void __iomem *d_display_frame_height; + void __iomem *d_display_status; + void __iomem *d_display_first_plane_addr; + void __iomem *d_display_second_plane_addr; + void __iomem *d_display_third_plane_addr;/* only v8 */ + void __iomem *d_display_frame_type; + void __iomem *d_display_crop_info1; + void __iomem *d_display_crop_info2; + void __iomem *d_display_picture_profile; + void __iomem *d_display_luma_crc;/* v7 and v8 */ + void __iomem *d_display_chroma0_crc;/* v7 and v8 */ + void __iomem *d_display_chroma1_crc;/* only v8 */ + void __iomem *d_display_luma_crc_top;/* only v6 */ + void __iomem *d_display_chroma_crc_top;/* only v6 */ + void __iomem *d_display_luma_crc_bot;/* only v6 */ + void __iomem *d_display_chroma_crc_bot;/* only v6 */ + void __iomem *d_display_aspect_ratio; + void __iomem *d_display_extended_ar; + void __iomem *d_decoded_frame_width; + void __iomem *d_decoded_frame_height; + void __iomem *d_decoded_status; + void __iomem *d_decoded_first_plane_addr; + void __iomem *d_decoded_second_plane_addr; + void __iomem *d_decoded_third_plane_addr;/* only v8 */ + void __iomem *d_decoded_frame_type; + void __iomem *d_decoded_crop_info1; + void __iomem *d_decoded_crop_info2; + void __iomem *d_decoded_picture_profile; + void __iomem *d_decoded_nal_size; + void __iomem *d_decoded_luma_crc; + void __iomem *d_decoded_chroma0_crc; + void __iomem *d_decoded_chroma1_crc;/* only v8 */ + void __iomem *d_ret_picture_tag_top; + void __iomem *d_ret_picture_tag_bot; + void __iomem *d_ret_picture_time_top; + void __iomem *d_ret_picture_time_bot; + void __iomem *d_chroma_format; + void __iomem *d_vc1_info;/* v7 and v8 */ + void __iomem *d_mpeg4_info; + void __iomem *d_h264_info; + void __iomem *d_metadata_addr_concealed_mb; + void __iomem *d_metadata_size_concealed_mb; + void __iomem *d_metadata_addr_vc1_param; + void __iomem *d_metadata_size_vc1_param; + void __iomem *d_metadata_addr_sei_nal; + void __iomem *d_metadata_size_sei_nal; + void __iomem *d_metadata_addr_vui; + void __iomem *d_metadata_size_vui; + void __iomem *d_metadata_addr_mvcvui;/* v7 and v8 */ + void __iomem *d_metadata_size_mvcvui;/* v7 and v8 */ + void __iomem *d_mvc_view_id; + void __iomem *d_frame_pack_sei_avail; + void __iomem *d_frame_pack_arrgment_id; + void __iomem *d_frame_pack_sei_info; + void __iomem *d_frame_pack_grid_pos; + void __iomem *d_display_recovery_sei_info;/* v7 and v8 */ + void __iomem *d_decoded_recovery_sei_info;/* v7 and v8 */ + void __iomem *d_display_first_addr;/* only v7 */ + void __iomem *d_display_second_addr;/* only v7 */ + void __iomem *d_display_third_addr;/* only v7 */ + void __iomem *d_decoded_first_addr;/* only v7 */ + void __iomem *d_decoded_second_addr;/* only v7 */ + void __iomem *d_decoded_third_addr;/* only v7 */ + void __iomem *d_used_dpb_flag_upper;/* v7 and v8 */ + void __iomem *d_used_dpb_flag_lower;/* v7 and v8 */ /* encoder registers */ - volatile void __iomem *e_frame_width; - volatile void __iomem *e_frame_height; - volatile void __iomem *e_cropped_frame_width; - volatile void __iomem *e_cropped_frame_height; - volatile void __iomem *e_frame_crop_offset; - volatile void __iomem *e_enc_options; - volatile void __iomem *e_picture_profile; - volatile void __iomem *e_vbv_buffer_size; - volatile void __iomem *e_vbv_init_delay; - volatile void __iomem *e_fixed_picture_qp; - volatile void __iomem *e_rc_config; - volatile void __iomem *e_rc_qp_bound; - volatile void __iomem *e_rc_qp_bound_pb;/* v7 and v8 */ - volatile void __iomem *e_rc_mode; - volatile void __iomem *e_mb_rc_config; - volatile void __iomem *e_padding_ctrl; - volatile void __iomem *e_air_threshold; - volatile void __iomem *e_mv_hor_range; - volatile void __iomem *e_mv_ver_range; - volatile void __iomem *e_num_dpb; - volatile void __iomem *e_luma_dpb; - volatile void __iomem *e_chroma_dpb; - volatile void __iomem *e_me_buffer; - volatile void __iomem *e_scratch_buffer_addr; - volatile void __iomem *e_scratch_buffer_size; - volatile void __iomem *e_tmv_buffer0; - volatile void __iomem *e_tmv_buffer1; - volatile void __iomem *e_ir_buffer_addr;/* v7 and v8 */ - volatile void __iomem *e_source_first_plane_addr; - volatile void __iomem *e_source_second_plane_addr; - volatile void __iomem *e_source_third_plane_addr;/* v7 and v8 */ - volatile void __iomem *e_source_first_plane_stride;/* v7 and v8 */ - volatile void __iomem *e_source_second_plane_stride;/* v7 and v8 */ - volatile void __iomem *e_source_third_plane_stride;/* v7 and v8 */ - volatile void __iomem *e_stream_buffer_addr; - volatile void __iomem *e_stream_buffer_size; - volatile void __iomem *e_roi_buffer_addr; - volatile void __iomem *e_param_change; - volatile void __iomem *e_ir_size; - volatile void __iomem *e_gop_config; - volatile void __iomem *e_mslice_mode; - volatile void __iomem *e_mslice_size_mb; - volatile void __iomem *e_mslice_size_bits; - volatile void __iomem *e_frame_insertion; - volatile void __iomem *e_rc_frame_rate; - volatile void __iomem *e_rc_bit_rate; - volatile void __iomem *e_rc_roi_ctrl; - volatile void __iomem *e_picture_tag; - volatile void __iomem *e_bit_count_enable; - volatile void __iomem *e_max_bit_count; - volatile void __iomem *e_min_bit_count; - volatile void __iomem *e_metadata_buffer_addr; - volatile void __iomem *e_metadata_buffer_size; - volatile void __iomem *e_encoded_source_first_plane_addr; - volatile void __iomem *e_encoded_source_second_plane_addr; - volatile void __iomem *e_encoded_source_third_plane_addr;/* v7 and v8 */ - volatile void __iomem *e_stream_size; - volatile void __iomem *e_slice_type; - volatile void __iomem *e_picture_count; - volatile void __iomem *e_ret_picture_tag; - volatile void __iomem *e_stream_buffer_write_pointer; /* only v6 */ - volatile void __iomem *e_recon_luma_dpb_addr; - volatile void __iomem *e_recon_chroma_dpb_addr; - volatile void __iomem *e_metadata_addr_enc_slice; - volatile void __iomem *e_metadata_size_enc_slice; - volatile void __iomem *e_mpeg4_options; - volatile void __iomem *e_mpeg4_hec_period; - volatile void __iomem *e_aspect_ratio; - volatile void __iomem *e_extended_sar; - volatile void __iomem *e_h264_options; - volatile void __iomem *e_h264_options_2;/* v7 and v8 */ - volatile void __iomem *e_h264_lf_alpha_offset; - volatile void __iomem *e_h264_lf_beta_offset; - volatile void __iomem *e_h264_i_period; - volatile void __iomem *e_h264_fmo_slice_grp_map_type; - volatile void __iomem *e_h264_fmo_num_slice_grp_minus1; - volatile void __iomem *e_h264_fmo_slice_grp_change_dir; - volatile void __iomem *e_h264_fmo_slice_grp_change_rate_minus1; - volatile void __iomem *e_h264_fmo_run_length_minus1_0; - volatile void __iomem *e_h264_aso_slice_order_0; - volatile void __iomem *e_h264_chroma_qp_offset; - volatile void __iomem *e_h264_num_t_layer; - volatile void __iomem *e_h264_hierarchical_qp_layer0; - volatile void __iomem *e_h264_frame_packing_sei_info; - volatile void __iomem *e_h264_nal_control;/* v7 and v8 */ - volatile void __iomem *e_mvc_frame_qp_view1; - volatile void __iomem *e_mvc_rc_bit_rate_view1; - volatile void __iomem *e_mvc_rc_qbound_view1; - volatile void __iomem *e_mvc_rc_mode_view1; - volatile void __iomem *e_mvc_inter_view_prediction_on; - volatile void __iomem *e_vp8_options;/* v7 and v8 */ - volatile void __iomem *e_vp8_filter_options;/* v7 and v8 */ - volatile void __iomem *e_vp8_golden_frame_option;/* v7 and v8 */ - volatile void __iomem *e_vp8_num_t_layer;/* v7 and v8 */ - volatile void __iomem *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */ - volatile void __iomem *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */ - volatile void __iomem *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */ + void __iomem *e_frame_width; + void __iomem *e_frame_height; + void __iomem *e_cropped_frame_width; + void __iomem *e_cropped_frame_height; + void __iomem *e_frame_crop_offset; + void __iomem *e_enc_options; + void __iomem *e_picture_profile; + void __iomem *e_vbv_buffer_size; + void __iomem *e_vbv_init_delay; + void __iomem *e_fixed_picture_qp; + void __iomem *e_rc_config; + void __iomem *e_rc_qp_bound; + void __iomem *e_rc_qp_bound_pb;/* v7 and v8 */ + void __iomem *e_rc_mode; + void __iomem *e_mb_rc_config; + void __iomem *e_padding_ctrl; + void __iomem *e_air_threshold; + void __iomem *e_mv_hor_range; + void __iomem *e_mv_ver_range; + void __iomem *e_num_dpb; + void __iomem *e_luma_dpb; + void __iomem *e_chroma_dpb; + void __iomem *e_me_buffer; + void __iomem *e_scratch_buffer_addr; + void __iomem *e_scratch_buffer_size; + void __iomem *e_tmv_buffer0; + void __iomem *e_tmv_buffer1; + void __iomem *e_ir_buffer_addr;/* v7 and v8 */ + void __iomem *e_source_first_plane_addr; + void __iomem *e_source_second_plane_addr; + void __iomem *e_source_third_plane_addr;/* v7 and v8 */ + void __iomem *e_source_first_plane_stride;/* v7 and v8 */ + void __iomem *e_source_second_plane_stride;/* v7 and v8 */ + void __iomem *e_source_third_plane_stride;/* v7 and v8 */ + void __iomem *e_stream_buffer_addr; + void __iomem *e_stream_buffer_size; + void __iomem *e_roi_buffer_addr; + void __iomem *e_param_change; + void __iomem *e_ir_size; + void __iomem *e_gop_config; + void __iomem *e_mslice_mode; + void __iomem *e_mslice_size_mb; + void __iomem *e_mslice_size_bits; + void __iomem *e_frame_insertion; + void __iomem *e_rc_frame_rate; + void __iomem *e_rc_bit_rate; + void __iomem *e_rc_roi_ctrl; + void __iomem *e_picture_tag; + void __iomem *e_bit_count_enable; + void __iomem *e_max_bit_count; + void __iomem *e_min_bit_count; + void __iomem *e_metadata_buffer_addr; + void __iomem *e_metadata_buffer_size; + void __iomem *e_encoded_source_first_plane_addr; + void __iomem *e_encoded_source_second_plane_addr; + void __iomem *e_encoded_source_third_plane_addr;/* v7 and v8 */ + void __iomem *e_stream_size; + void __iomem *e_slice_type; + void __iomem *e_picture_count; + void __iomem *e_ret_picture_tag; + void __iomem *e_stream_buffer_write_pointer; /* only v6 */ + void __iomem *e_recon_luma_dpb_addr; + void __iomem *e_recon_chroma_dpb_addr; + void __iomem *e_metadata_addr_enc_slice; + void __iomem *e_metadata_size_enc_slice; + void __iomem *e_mpeg4_options; + void __iomem *e_mpeg4_hec_period; + void __iomem *e_aspect_ratio; + void __iomem *e_extended_sar; + void __iomem *e_h264_options; + void __iomem *e_h264_options_2;/* v7 and v8 */ + void __iomem *e_h264_lf_alpha_offset; + void __iomem *e_h264_lf_beta_offset; + void __iomem *e_h264_i_period; + void __iomem *e_h264_fmo_slice_grp_map_type; + void __iomem *e_h264_fmo_num_slice_grp_minus1; + void __iomem *e_h264_fmo_slice_grp_change_dir; + void __iomem *e_h264_fmo_slice_grp_change_rate_minus1; + void __iomem *e_h264_fmo_run_length_minus1_0; + void __iomem *e_h264_aso_slice_order_0; + void __iomem *e_h264_chroma_qp_offset; + void __iomem *e_h264_num_t_layer; + void __iomem *e_h264_hierarchical_qp_layer0; + void __iomem *e_h264_frame_packing_sei_info; + void __iomem *e_h264_nal_control;/* v7 and v8 */ + void __iomem *e_mvc_frame_qp_view1; + void __iomem *e_mvc_rc_bit_rate_view1; + void __iomem *e_mvc_rc_qbound_view1; + void __iomem *e_mvc_rc_mode_view1; + void __iomem *e_mvc_inter_view_prediction_on; + void __iomem *e_vp8_options;/* v7 and v8 */ + void __iomem *e_vp8_filter_options;/* v7 and v8 */ + void __iomem *e_vp8_golden_frame_option;/* v7 and v8 */ + void __iomem *e_vp8_num_t_layer;/* v7 and v8 */ + void __iomem *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */ + void __iomem *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */ + void __iomem *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */ }; struct s5p_mfc_hw_ops { @@ -281,28 +281,14 @@ struct s5p_mfc_hw_ops { void (*release_dev_context_buffer)(struct s5p_mfc_dev *dev); void (*dec_calc_dpb_size)(struct s5p_mfc_ctx *ctx); void (*enc_calc_src_size)(struct s5p_mfc_ctx *ctx); - int (*set_dec_stream_buffer)(struct s5p_mfc_ctx *ctx, - int buf_addr, unsigned int start_num_byte, - unsigned int buf_size); - int (*set_dec_frame_buffer)(struct s5p_mfc_ctx *ctx); int (*set_enc_stream_buffer)(struct s5p_mfc_ctx *ctx, unsigned long addr, unsigned int size); void (*set_enc_frame_buffer)(struct s5p_mfc_ctx *ctx, unsigned long y_addr, unsigned long c_addr); void (*get_enc_frame_buffer)(struct s5p_mfc_ctx *ctx, unsigned long *y_addr, unsigned long *c_addr); - int (*set_enc_ref_buffer)(struct s5p_mfc_ctx *ctx); - int (*init_decode)(struct s5p_mfc_ctx *ctx); - int (*init_encode)(struct s5p_mfc_ctx *ctx); - int (*encode_one_frame)(struct s5p_mfc_ctx *ctx); void (*try_run)(struct s5p_mfc_dev *dev); - void (*cleanup_queue)(struct list_head *lh, - struct vb2_queue *vq); void (*clear_int_flags)(struct s5p_mfc_dev *dev); - void (*write_info)(struct s5p_mfc_ctx *ctx, unsigned int data, - unsigned int ofs); - unsigned int (*read_info)(struct s5p_mfc_ctx *ctx, - unsigned long ofs); int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev); int (*get_dec_y_adr)(struct s5p_mfc_dev *dev); int (*get_dspl_status)(struct s5p_mfc_dev *dev); @@ -313,7 +299,6 @@ struct s5p_mfc_hw_ops { int (*get_int_reason)(struct s5p_mfc_dev *dev); int (*get_int_err)(struct s5p_mfc_dev *dev); int (*err_dec)(unsigned int err); - int (*err_dspl)(unsigned int err); int (*get_img_width)(struct s5p_mfc_dev *dev); int (*get_img_height)(struct s5p_mfc_dev *dev); int (*get_dpb_count)(struct s5p_mfc_dev *dev); @@ -322,10 +307,6 @@ struct s5p_mfc_hw_ops { int (*get_enc_strm_size)(struct s5p_mfc_dev *dev); int (*get_enc_slice_type)(struct s5p_mfc_dev *dev); int (*get_enc_dpb_count)(struct s5p_mfc_dev *dev); - int (*get_enc_pic_count)(struct s5p_mfc_dev *dev); - int (*get_sei_avail_status)(struct s5p_mfc_ctx *ctx); - int (*get_mvc_num_views)(struct s5p_mfc_dev *dev); - int (*get_mvc_view_id)(struct s5p_mfc_dev *dev); unsigned int (*get_pic_type_top)(struct s5p_mfc_ctx *ctx); unsigned int (*get_pic_type_bot)(struct s5p_mfc_ctx *ctx); unsigned int (*get_crop_info_h)(struct s5p_mfc_ctx *ctx); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 873c933bc7d4..81e1e4ce6c24 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1153,27 +1153,6 @@ static int s5p_mfc_encode_one_frame_v5(struct s5p_mfc_ctx *ctx) return 0; } -static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) -{ - unsigned long flags; - int new_ctx; - int cnt; - - spin_lock_irqsave(&dev->condlock, flags); - new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS; - cnt = 0; - while (!test_bit(new_ctx, &dev->ctx_work_bits)) { - new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS; - if (++cnt > MFC_NUM_CONTEXTS) { - /* No contexts to run */ - spin_unlock_irqrestore(&dev->condlock, flags); - return -EAGAIN; - } - } - spin_unlock_irqrestore(&dev->condlock, flags); - return new_ctx; -} - static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; @@ -1187,7 +1166,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; - unsigned long flags; if (ctx->state == MFCINST_FINISHING) { last_frame = MFC_DEC_LAST_FRAME; @@ -1197,11 +1175,9 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) return 0; } - spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } /* Get the next source buffer */ @@ -1210,7 +1186,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { last_frame = MFC_DEC_LAST_FRAME; @@ -1224,21 +1199,17 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; unsigned long src_y_addr, src_c_addr, dst_addr; unsigned int dst_size; - spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { mfc_debug(2, "no src buffers\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->dst_queue)) { mfc_debug(2, "no dst buffers\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->src_queue)) { @@ -1270,7 +1241,6 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; mfc_debug(2, "encoding buffer with index=%d state=%d\n", src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state); @@ -1281,11 +1251,9 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *temp_vb; /* Initializing decoding - parsing header */ - spin_lock_irqsave(&dev->irqlock, flags); mfc_debug(2, "Preparing to init decoding\n"); temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_set_dec_desc_buffer(ctx); @@ -1294,7 +1262,6 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0, temp_vb->b->vb2_buf.planes[0].bytesused); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_init_decode_v5(ctx); } @@ -1302,18 +1269,15 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; s5p_mfc_set_enc_ref_buffer_v5(ctx); - spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_init_encode_v5(ctx); } @@ -1321,7 +1285,6 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *temp_vb; int ret; @@ -1335,11 +1298,9 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) "before starting processing\n"); return -EAGAIN; } - spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_err("Header has been deallocated in the middle of" " initialization\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EIO; } temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); @@ -1348,7 +1309,6 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0, temp_vb->b->vb2_buf.planes[0].bytesused); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); if (ret) { @@ -1472,21 +1432,6 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev) } } - -static void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq) -{ - struct s5p_mfc_buf *b; - int i; - - while (!list_empty(lh)) { - b = list_entry(lh->next, struct s5p_mfc_buf, list); - for (i = 0; i < b->b->vb2_buf.num_planes; i++) - vb2_set_plane_payload(&b->b->vb2_buf, i, 0); - vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR); - list_del(&b->list); - } -} - static void s5p_mfc_clear_int_flags_v5(struct s5p_mfc_dev *dev) { mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT); @@ -1590,11 +1535,6 @@ static int s5p_mfc_err_dec_v5(unsigned int err) return (err & S5P_FIMV_ERR_DEC_MASK) >> S5P_FIMV_ERR_DEC_SHIFT; } -static int s5p_mfc_err_dspl_v5(unsigned int err) -{ - return (err & S5P_FIMV_ERR_DSPL_MASK) >> S5P_FIMV_ERR_DSPL_SHIFT; -} - static int s5p_mfc_get_img_width_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_HRESOL); @@ -1636,26 +1576,6 @@ static int s5p_mfc_get_enc_dpb_count_v5(struct s5p_mfc_dev *dev) return -1; } -static int s5p_mfc_get_enc_pic_count_v5(struct s5p_mfc_dev *dev) -{ - return mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT); -} - -static int s5p_mfc_get_sei_avail_status_v5(struct s5p_mfc_ctx *ctx) -{ - return s5p_mfc_read_info_v5(ctx, FRAME_PACK_SEI_AVAIL); -} - -static int s5p_mfc_get_mvc_num_views_v5(struct s5p_mfc_dev *dev) -{ - return -1; -} - -static int s5p_mfc_get_mvc_view_id_v5(struct s5p_mfc_dev *dev) -{ - return -1; -} - static unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP); @@ -1688,20 +1608,11 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = { .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v5, .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v5, .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v5, - .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v5, - .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v5, .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v5, .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v5, .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v5, - .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v5, - .init_decode = s5p_mfc_init_decode_v5, - .init_encode = s5p_mfc_init_encode_v5, - .encode_one_frame = s5p_mfc_encode_one_frame_v5, .try_run = s5p_mfc_try_run_v5, - .cleanup_queue = s5p_mfc_cleanup_queue_v5, .clear_int_flags = s5p_mfc_clear_int_flags_v5, - .write_info = s5p_mfc_write_info_v5, - .read_info = s5p_mfc_read_info_v5, .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v5, .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v5, .get_dspl_status = s5p_mfc_get_dspl_status_v5, @@ -1712,7 +1623,6 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = { .get_int_reason = s5p_mfc_get_int_reason_v5, .get_int_err = s5p_mfc_get_int_err_v5, .err_dec = s5p_mfc_err_dec_v5, - .err_dspl = s5p_mfc_err_dspl_v5, .get_img_width = s5p_mfc_get_img_width_v5, .get_img_height = s5p_mfc_get_img_height_v5, .get_dpb_count = s5p_mfc_get_dpb_count_v5, @@ -1721,10 +1631,6 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = { .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v5, .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v5, .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v5, - .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v5, - .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v5, - .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v5, - .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v5, .get_pic_type_top = s5p_mfc_get_pic_type_top_v5, .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v5, .get_crop_info_h = s5p_mfc_get_crop_info_h_v5, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index b95845347348..d6f207e859ab 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -505,7 +505,7 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx) } writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_INIT_BUFS_V6, NULL); mfc_debug(2, "After setting buffers.\n"); @@ -603,7 +603,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) } writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_INIT_BUFS_V6, NULL); mfc_debug_leave(); @@ -1378,7 +1378,7 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx) writel(ctx->sei_fp_parse & 0x1, mfc_regs->d_sei_enable); writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_SEQ_HEADER_V6, NULL); mfc_debug_leave(); @@ -1393,7 +1393,7 @@ static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) if (flush) { dev->curr_ctx = ctx->num; writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_H2R_CMD_FLUSH_V6, NULL); } } @@ -1413,11 +1413,11 @@ static int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx, * is the last frame or not. */ switch (last_frame) { case 0: - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_FRAME_START_V6, NULL); break; case 1: - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_LAST_FRAME_V6, NULL); break; default: @@ -1455,7 +1455,7 @@ static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx) } writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_SEQ_HEADER_V6, NULL); return 0; @@ -1500,37 +1500,13 @@ static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx) cmd = S5P_FIMV_CH_LAST_FRAME_V6; writel(ctx->inst_no, mfc_regs->instance_id); - s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, cmd, NULL); + s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, cmd, NULL); mfc_debug(2, "--\n"); return 0; } -static inline int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) -{ - unsigned long flags; - int new_ctx; - int cnt; - - spin_lock_irqsave(&dev->condlock, flags); - mfc_debug(2, "Previous context: %d (bits %08lx)\n", dev->curr_ctx, - dev->ctx_work_bits); - new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS; - cnt = 0; - while (!test_bit(new_ctx, &dev->ctx_work_bits)) { - new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS; - cnt++; - if (cnt > MFC_NUM_CONTEXTS) { - /* No contexts to run */ - spin_unlock_irqrestore(&dev->condlock, flags); - return -EAGAIN; - } - } - spin_unlock_irqrestore(&dev->condlock, flags); - return new_ctx; -} - static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; @@ -1544,7 +1520,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; - unsigned long flags; int last_frame = 0; if (ctx->state == MFCINST_FINISHING) { @@ -1556,11 +1531,9 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) return 0; } - spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers.\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } /* Get the next source buffer */ @@ -1570,7 +1543,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { @@ -1586,7 +1558,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; unsigned long src_y_addr, src_c_addr, dst_addr; @@ -1595,17 +1566,13 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) */ unsigned int dst_size; - spin_lock_irqsave(&dev->irqlock, flags); - if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { mfc_debug(2, "no src buffers.\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->dst_queue)) { mfc_debug(2, "no dst buffers.\n"); - spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } @@ -1639,8 +1606,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); - dev->curr_ctx = ctx->num; s5p_mfc_encode_one_frame_v6(ctx); @@ -1650,18 +1615,15 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *temp_vb; /* Initializing decoding - parsing header */ - spin_lock_irqsave(&dev->irqlock, flags); mfc_debug(2, "Preparing to init decoding.\n"); temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); mfc_debug(2, "Header size: %d\n", temp_vb->b->vb2_buf.planes[0].bytesused); s5p_mfc_set_dec_stream_buffer_v6(ctx, vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0, temp_vb->b->vb2_buf.planes[0].bytesused); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_init_decode_v6(ctx); } @@ -1669,18 +1631,14 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - unsigned long flags; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; - spin_lock_irqsave(&dev->irqlock, flags); - dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); - spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_init_encode_v6(ctx); } @@ -1846,21 +1804,6 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) } } - -static void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq) -{ - struct s5p_mfc_buf *b; - int i; - - while (!list_empty(lh)) { - b = list_entry(lh->next, struct s5p_mfc_buf, list); - for (i = 0; i < b->b->vb2_buf.num_planes; i++) - vb2_set_plane_payload(&b->b->vb2_buf, i, 0); - vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR); - list_del(&b->list); - } -} - static void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev) { const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs; @@ -1868,14 +1811,6 @@ static void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev) writel(0, mfc_regs->risc2host_int); } -static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data, - unsigned int ofs) -{ - s5p_mfc_clock_on(); - writel(data, (void __iomem *)((unsigned long)ofs)); - s5p_mfc_clock_off(); -} - static unsigned int s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs) { @@ -1942,11 +1877,6 @@ static int s5p_mfc_err_dec_v6(unsigned int err) return (err & S5P_FIMV_ERR_DEC_MASK_V6) >> S5P_FIMV_ERR_DEC_SHIFT_V6; } -static int s5p_mfc_err_dspl_v6(unsigned int err) -{ - return (err & S5P_FIMV_ERR_DSPL_MASK_V6) >> S5P_FIMV_ERR_DSPL_SHIFT_V6; -} - static int s5p_mfc_get_img_width_v6(struct s5p_mfc_dev *dev) { return readl(dev->mfc_regs->d_display_frame_width); @@ -1987,27 +1917,6 @@ static int s5p_mfc_get_enc_slice_type_v6(struct s5p_mfc_dev *dev) return readl(dev->mfc_regs->e_slice_type); } -static int s5p_mfc_get_enc_pic_count_v6(struct s5p_mfc_dev *dev) -{ - return readl(dev->mfc_regs->e_picture_count); -} - -static int s5p_mfc_get_sei_avail_status_v6(struct s5p_mfc_ctx *ctx) -{ - struct s5p_mfc_dev *dev = ctx->dev; - return readl(dev->mfc_regs->d_frame_pack_sei_avail); -} - -static int s5p_mfc_get_mvc_num_views_v6(struct s5p_mfc_dev *dev) -{ - return readl(dev->mfc_regs->d_mvc_num_views); -} - -static int s5p_mfc_get_mvc_view_id_v6(struct s5p_mfc_dev *dev) -{ - return readl(dev->mfc_regs->d_mvc_view_id); -} - static unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v6(ctx, @@ -2282,20 +2191,11 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = { s5p_mfc_release_dev_context_buffer_v6, .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v6, .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v6, - .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v6, - .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v6, .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v6, .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v6, .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v6, - .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v6, - .init_decode = s5p_mfc_init_decode_v6, - .init_encode = s5p_mfc_init_encode_v6, - .encode_one_frame = s5p_mfc_encode_one_frame_v6, .try_run = s5p_mfc_try_run_v6, - .cleanup_queue = s5p_mfc_cleanup_queue_v6, .clear_int_flags = s5p_mfc_clear_int_flags_v6, - .write_info = s5p_mfc_write_info_v6, - .read_info = s5p_mfc_read_info_v6, .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v6, .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v6, .get_dspl_status = s5p_mfc_get_dspl_status_v6, @@ -2306,7 +2206,6 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = { .get_int_reason = s5p_mfc_get_int_reason_v6, .get_int_err = s5p_mfc_get_int_err_v6, .err_dec = s5p_mfc_err_dec_v6, - .err_dspl = s5p_mfc_err_dspl_v6, .get_img_width = s5p_mfc_get_img_width_v6, .get_img_height = s5p_mfc_get_img_height_v6, .get_dpb_count = s5p_mfc_get_dpb_count_v6, @@ -2315,10 +2214,6 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = { .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v6, .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v6, .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v6, - .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v6, - .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v6, - .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v6, - .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v6, .get_pic_type_top = s5p_mfc_get_pic_type_top_v6, .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v6, .get_crop_info_h = s5p_mfc_get_crop_info_h_v6, diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c index 79940757b34f..e71b13e40f59 100644 --- a/drivers/media/platform/s5p-tv/hdmi_drv.c +++ b/drivers/media/platform/s5p-tv/hdmi_drv.c @@ -33,7 +33,7 @@ #include <linux/regulator/consumer.h> #include <linux/v4l2-dv-timings.h> -#include <media/s5p_hdmi.h> +#include <linux/platform_data/media/s5p_hdmi.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> @@ -627,7 +627,7 @@ static int hdmi_s_dv_timings(struct v4l2_subdev *sd, for (i = 0; i < ARRAY_SIZE(hdmi_timings); i++) if (v4l2_match_dv_timings(&hdmi_timings[i].dv_timings, - timings, 0)) + timings, 0, false)) break; if (i == ARRAY_SIZE(hdmi_timings)) { dev_err(dev, "timings not supported\n"); diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c index dc1c679e136c..d9e7f030294c 100644 --- a/drivers/media/platform/s5p-tv/mixer_video.c +++ b/drivers/media/platform/s5p-tv/mixer_video.c @@ -881,7 +881,7 @@ static const struct v4l2_file_operations mxr_fops = { .unlocked_ioctl = video_ioctl2, }; -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c index 8d171310af8f..0a97f9ab4f76 100644 --- a/drivers/media/platform/s5p-tv/sii9234_drv.c +++ b/drivers/media/platform/s5p-tv/sii9234_drv.c @@ -23,7 +23,7 @@ #include <linux/regulator/machine.h> #include <linux/slab.h> -#include <media/sii9234.h> +#include <linux/platform_data/media/sii9234.h> #include <media/v4l2-subdev.h> MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>"); diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index d6ab33e7060a..82b5d69b87fa 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -865,32 +865,14 @@ static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = { /* ========== Queue operations ========== */ static int sh_veu_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *f = parg; struct sh_veu_dev *veu = vb2_get_drv_priv(vq); - struct sh_veu_vfmt *vfmt; - unsigned int size, count = *nbuffers; - - if (f) { - const struct v4l2_pix_format *pix = &f->fmt.pix; - const struct sh_veu_format *fmt = sh_veu_find_fmt(f); - struct v4l2_format ftmp = *f; - - if (fmt->fourcc != pix->pixelformat) - return -EINVAL; - sh_veu_try_fmt(&ftmp, fmt); - if (ftmp.fmt.pix.width != pix->width || - ftmp.fmt.pix.height != pix->height) - return -EINVAL; - size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth : - pix->width * pix->height * fmt->depth / fmt->ydepth; - } else { - vfmt = sh_veu_get_vfmt(veu, vq->type); - size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; - } + struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type); + unsigned int count = *nbuffers; + unsigned int size = vfmt->bytesperline * vfmt->frame.height * + vfmt->fmt->depth / vfmt->fmt->ydepth; if (count < 2) *nbuffers = count = 2; @@ -900,6 +882,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq, *nbuffers = count; } + if (*nplanes) { + alloc_ctxs[0] = veu->alloc_ctx; + return sizes[0] < size ? -EINVAL : 0; + } + *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = veu->alloc_ctx; @@ -1107,7 +1094,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id) if (!src || !dst) return IRQ_NONE; - dst->timestamp = src->timestamp; + dst->vb2_buf.timestamp = src->vb2_buf.timestamp; dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c index 2231f8922df3..115740498274 100644 --- a/drivers/media/platform/sh_vou.c +++ b/drivers/media/platform/sh_vou.c @@ -22,7 +22,7 @@ #include <linux/videodev2.h> #include <linux/module.h> -#include <media/sh_vou.h> +#include <media/drv-intf/sh_vou.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> @@ -243,22 +243,21 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev) } /* Locking: caller holds fop_lock mutex */ -static int sh_vou_queue_setup(struct vb2_queue *vq, const void *parg, +static int sh_vou_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix = &vou_dev->pix; int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); - if (fmt && fmt->fmt.pix.sizeimage < pix->height * bytes_per_line) - return -EINVAL; - *nplanes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : pix->height * bytes_per_line; alloc_ctxs[0] = vou_dev->alloc_ctx; + if (*nplanes) + return sizes[0] < pix->height * bytes_per_line ? -EINVAL : 0; + *nplanes = 1; + sizes[0] = pix->height * bytes_per_line; return 0; } @@ -1071,7 +1070,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id) list_del(&vb->list); - v4l2_get_timestamp(&vb->vb.timestamp); + vb->vb.vb2_buf.timestamp = ktime_get_ns(); vb->vb.sequence = vou_dev->sequence++; vb->vb.field = V4L2_FIELD_INTERLACED; vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE); diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index 454f68f0cdad..c398b285180c 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-of.h> #include <media/videobuf2-dma-contig.h> @@ -79,6 +79,7 @@ struct atmel_isi { dma_addr_t fb_descriptors_phys; struct list_head dma_desc_head; struct isi_dma_desc dma_desc[MAX_BUFFER_NUM]; + bool enable_preview_path; struct completion complete; /* ISI peripherial clock */ @@ -103,13 +104,55 @@ static u32 isi_readl(struct atmel_isi *isi, u32 reg) return readl(isi->regs + reg); } +static u32 setup_cfg2_yuv_swap(struct atmel_isi *isi, + const struct soc_camera_format_xlate *xlate) +{ + if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUYV) { + /* all convert to YUYV */ + switch (xlate->code) { + case MEDIA_BUS_FMT_VYUY8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_3; + case MEDIA_BUS_FMT_UYVY8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_2; + case MEDIA_BUS_FMT_YVYU8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_1; + } + } else if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_RGB565) { + /* + * Preview path is enabled, it will convert UYVY to RGB format. + * But if sensor output format is not UYVY, we need to set + * YCC_SWAP_MODE to convert it as UYVY. + */ + switch (xlate->code) { + case MEDIA_BUS_FMT_VYUY8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_1; + case MEDIA_BUS_FMT_YUYV8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_2; + case MEDIA_BUS_FMT_YVYU8_2X8: + return ISI_CFG2_YCC_SWAP_MODE_3; + } + } + + /* + * By default, no swap for the codec path of Atmel ISI. So codec + * output is same as sensor's output. + * For instance, if sensor's output is YUYV, then codec outputs YUYV. + * And if sensor's output is UYVY, then codec outputs UYVY. + */ + return ISI_CFG2_YCC_SWAP_DEFAULT; +} + static void configure_geometry(struct atmel_isi *isi, u32 width, - u32 height, u32 code) + u32 height, const struct soc_camera_format_xlate *xlate) { - u32 cfg2; + u32 cfg2, psize; + u32 fourcc = xlate->host_fmt->fourcc; + + isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 || + fourcc == V4L2_PIX_FMT_RGB32; /* According to sensor's output format to set cfg2 */ - switch (code) { + switch (xlate->code) { default: /* Grey */ case MEDIA_BUS_FMT_Y8_1X8: @@ -117,16 +160,11 @@ static void configure_geometry(struct atmel_isi *isi, u32 width, break; /* YUV */ case MEDIA_BUS_FMT_VYUY8_2X8: - cfg2 = ISI_CFG2_YCC_SWAP_MODE_3 | ISI_CFG2_COL_SPACE_YCbCr; - break; case MEDIA_BUS_FMT_UYVY8_2X8: - cfg2 = ISI_CFG2_YCC_SWAP_MODE_2 | ISI_CFG2_COL_SPACE_YCbCr; - break; case MEDIA_BUS_FMT_YVYU8_2X8: - cfg2 = ISI_CFG2_YCC_SWAP_MODE_1 | ISI_CFG2_COL_SPACE_YCbCr; - break; case MEDIA_BUS_FMT_YUYV8_2X8: - cfg2 = ISI_CFG2_YCC_SWAP_DEFAULT | ISI_CFG2_COL_SPACE_YCbCr; + cfg2 = ISI_CFG2_COL_SPACE_YCbCr | + setup_cfg2_yuv_swap(isi, xlate); break; /* RGB, TODO */ } @@ -139,6 +177,16 @@ static void configure_geometry(struct atmel_isi *isi, u32 width, cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET) & ISI_CFG2_IM_VSIZE_MASK; isi_writel(isi, ISI_CFG2, cfg2); + + /* No down sampling, preview size equal to sensor output size */ + psize = ((width - 1) << ISI_PSIZE_PREV_HSIZE_OFFSET) & + ISI_PSIZE_PREV_HSIZE_MASK; + psize |= ((height - 1) << ISI_PSIZE_PREV_VSIZE_OFFSET) & + ISI_PSIZE_PREV_VSIZE_MASK; + isi_writel(isi, ISI_PSIZE, psize); + isi_writel(isi, ISI_PDECF, ISI_PDECF_NO_SAMPLING); + + return; } static bool is_supported(struct soc_camera_device *icd, @@ -151,8 +199,9 @@ static bool is_supported(struct soc_camera_device *icd, case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_VYUY: + /* RGB */ + case V4L2_PIX_FMT_RGB565: return true; - /* RGB, TODO */ default: return false; } @@ -165,7 +214,7 @@ static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi) struct frame_buffer *buf = isi->active; list_del_init(&buf->list); - v4l2_get_timestamp(&vbuf->timestamp); + vbuf->vb2_buf.timestamp = ktime_get_ns(); vbuf->sequence = isi->sequence++; vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); } @@ -176,11 +225,19 @@ static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi) /* start next dma frame. */ isi->active = list_entry(isi->video_buffer_list.next, struct frame_buffer, list); - isi_writel(isi, ISI_DMA_C_DSCR, - (u32)isi->active->p_dma_desc->fbd_phys); - isi_writel(isi, ISI_DMA_C_CTRL, - ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); - isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); + if (!isi->enable_preview_path) { + isi_writel(isi, ISI_DMA_C_DSCR, + (u32)isi->active->p_dma_desc->fbd_phys); + isi_writel(isi, ISI_DMA_C_CTRL, + ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); + isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); + } else { + isi_writel(isi, ISI_DMA_P_DSCR, + (u32)isi->active->p_dma_desc->fbd_phys); + isi_writel(isi, ISI_DMA_P_CTRL, + ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); + isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH); + } } return IRQ_HANDLED; } @@ -207,7 +264,8 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id) isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS); ret = IRQ_HANDLED; } else { - if (likely(pending & ISI_SR_CXFR_DONE)) + if (likely(pending & ISI_SR_CXFR_DONE) || + likely(pending & ISI_SR_PXFR_DONE)) ret = atmel_isi_handle_streaming(isi); } @@ -245,7 +303,7 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset) /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -352,21 +410,35 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer) ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE); /* Check if already in a frame */ - if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) { - dev_err(isi->soc_host.icd->parent, "Already in frame handling.\n"); - return; - } + if (!isi->enable_preview_path) { + if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) { + dev_err(isi->soc_host.icd->parent, "Already in frame handling.\n"); + return; + } - isi_writel(isi, ISI_DMA_C_DSCR, (u32)buffer->p_dma_desc->fbd_phys); - isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); - isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); + isi_writel(isi, ISI_DMA_C_DSCR, + (u32)buffer->p_dma_desc->fbd_phys); + isi_writel(isi, ISI_DMA_C_CTRL, + ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); + isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); + } else { + isi_writel(isi, ISI_DMA_P_DSCR, + (u32)buffer->p_dma_desc->fbd_phys); + isi_writel(isi, ISI_DMA_P_CTRL, + ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); + isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH); + } cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK; /* Enable linked list */ cfg1 |= isi->pdata.frate | ISI_CFG1_DISCR; - /* Enable codec path and ISI */ - ctrl = ISI_CTRL_CDC | ISI_CTRL_EN; + /* Enable ISI */ + ctrl = ISI_CTRL_EN; + + if (!isi->enable_preview_path) + ctrl |= ISI_CTRL_CDC; + isi_writel(isi, ISI_CTRL, ctrl); isi_writel(isi, ISI_CFG1, cfg1); } @@ -411,7 +483,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count) isi_writel(isi, ISI_INTDIS, (u32)~0UL); configure_geometry(isi, icd->user_width, icd->user_height, - icd->current_fmt->code); + icd->current_fmt); spin_lock_irq(&isi->lock); /* Clear any pending interrupt */ @@ -443,15 +515,17 @@ static void stop_streaming(struct vb2_queue *vq) } spin_unlock_irq(&isi->lock); - timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ; - /* Wait until the end of the current frame. */ - while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) && - time_before(jiffies, timeout)) - msleep(1); + if (!isi->enable_preview_path) { + timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ; + /* Wait until the end of the current frame. */ + while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) && + time_before(jiffies, timeout)) + msleep(1); - if (time_after(jiffies, timeout)) - dev_err(icd->parent, - "Timeout waiting for finishing codec request\n"); + if (time_after(jiffies, timeout)) + dev_err(icd->parent, + "Timeout waiting for finishing codec request\n"); + } /* Disable interrupts */ isi_writel(isi, ISI_INTDIS, @@ -617,6 +691,14 @@ static const struct soc_mbus_pixelfmt isi_camera_formats[] = { .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, + { + .fourcc = V4L2_PIX_FMT_RGB565, + .name = "RGB565", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + .layout = SOC_MBUS_LAYOUT_PACKED, + }, }; /* This will be corrected as we get more formats */ @@ -673,7 +755,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int formats = 0, ret; + int formats = 0, ret, i, n; /* sensor format */ struct v4l2_subdev_mbus_code_enum code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, @@ -707,11 +789,11 @@ static int isi_camera_get_formats(struct soc_camera_device *icd, case MEDIA_BUS_FMT_VYUY8_2X8: case MEDIA_BUS_FMT_YUYV8_2X8: case MEDIA_BUS_FMT_YVYU8_2X8: - formats++; - if (xlate) { - xlate->host_fmt = &isi_camera_formats[0]; + n = ARRAY_SIZE(isi_camera_formats); + formats += n; + for (i = 0; xlate && i < n; i++, xlate++) { + xlate->host_fmt = &isi_camera_formats[i]; xlate->code = code.code; - xlate++; dev_dbg(icd->parent, "Providing format %s using code %d\n", isi_camera_formats[0].name, code.code); } diff --git a/drivers/media/platform/soc_camera/atmel-isi.h b/drivers/media/platform/soc_camera/atmel-isi.h index 5acc771d2edc..0acb32a2b65c 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.h +++ b/drivers/media/platform/soc_camera/atmel-isi.h @@ -79,6 +79,16 @@ #define ISI_CFG2_IM_VSIZE_MASK (0x7FF << ISI_CFG2_IM_VSIZE_OFFSET) #define ISI_CFG2_IM_HSIZE_MASK (0x7FF << ISI_CFG2_IM_HSIZE_OFFSET) +/* Bitfields in PSIZE */ +#define ISI_PSIZE_PREV_VSIZE_OFFSET 0 +#define ISI_PSIZE_PREV_HSIZE_OFFSET 16 +#define ISI_PSIZE_PREV_VSIZE_MASK (0x3FF << ISI_PSIZE_PREV_VSIZE_OFFSET) +#define ISI_PSIZE_PREV_HSIZE_MASK (0x3FF << ISI_PSIZE_PREV_HSIZE_OFFSET) + +/* Bitfields in PDECF */ +#define ISI_PDECF_DEC_FACTOR_MASK (0xFF << 0) +#define ISI_PDECF_NO_SAMPLING (16) + /* Bitfields in CTRL */ /* Also using in SR(ISI_V2) */ #define ISI_CTRL_EN (1 << 0) diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c index 1f28d21a3c9a..48dd5b7851b5 100644 --- a/drivers/media/platform/soc_camera/mx2_camera.c +++ b/drivers/media/platform/soc_camera/mx2_camera.c @@ -35,11 +35,11 @@ #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <linux/videodev2.h> -#include <linux/platform_data/camera-mx2.h> +#include <linux/platform_data/media/camera-mx2.h> #include <asm/dma.h> @@ -469,21 +469,15 @@ static void mx2_camera_clock_stop(struct soc_camera_host *ici) * Videobuf operations */ static int mx2_videobuf_setup(struct vb2_queue *vq, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct mx2_camera_dev *pcdev = ici->priv; dev_dbg(icd->parent, "count=%d, size=%d\n", *count, sizes[0]); - /* TODO: support for VIDIOC_CREATE_BUFS not ready */ - if (fmt != NULL) - return -ENOTTY; - alloc_ctxs[0] = pcdev->alloc_ctx; sizes[0] = icd->sizeimage; @@ -1351,7 +1345,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev, vb2_get_plane_payload(vb, 0)); list_del_init(&buf->internal.queue); - v4l2_get_timestamp(&vbuf->timestamp); + vb->timestamp = ktime_get_ns(); vbuf->sequence = pcdev->frame_count; if (err) vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c index 49c3a257a916..169ed1150226 100644 --- a/drivers/media/platform/soc_camera/mx3_camera.c +++ b/drivers/media/platform/soc_camera/mx3_camera.c @@ -23,9 +23,9 @@ #include <media/v4l2-dev.h> #include <media/videobuf2-dma-contig.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> -#include <linux/platform_data/camera-mx3.h> +#include <linux/platform_data/media/camera-mx3.h> #include <linux/platform_data/dma-imx.h> #define MX3_CAM_DRV_NAME "mx3-camera" @@ -155,7 +155,7 @@ static void mx3_cam_dma_done(void *arg) struct mx3_camera_buffer *buf = to_mx3_vb(vb); list_del_init(&buf->queue); - v4l2_get_timestamp(&vb->timestamp); + vb->vb2_buf.timestamp = ktime_get_ns(); vb->field = mx3_cam->field; vb->sequence = mx3_cam->sequence++; vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE); @@ -185,11 +185,9 @@ static void mx3_cam_dma_done(void *arg) * Calculate the __buffer__ (not data) size and number of buffers. */ static int mx3_videobuf_setup(struct vb2_queue *vq, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct mx3_camera_dev *mx3_cam = ici->priv; @@ -197,33 +195,6 @@ static int mx3_videobuf_setup(struct vb2_queue *vq, if (!mx3_cam->idmac_channel[0]) return -EINVAL; - if (fmt) { - const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd, - fmt->fmt.pix.pixelformat); - unsigned int bytes_per_line; - int ret; - - if (!xlate) - return -EINVAL; - - ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width, - xlate->host_fmt); - if (ret < 0) - return ret; - - bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret); - - ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line, - fmt->fmt.pix.height); - if (ret < 0) - return ret; - - sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret); - } else { - /* Called from VIDIOC_REQBUFS or in compatibility mode */ - sizes[0] = icd->sizeimage; - } - alloc_ctxs[0] = mx3_cam->alloc_ctx; if (!vq->num_buffers) @@ -232,9 +203,14 @@ static int mx3_videobuf_setup(struct vb2_queue *vq, if (!*count) *count = 2; + /* Called from VIDIOC_REQBUFS or in compatibility mode */ + if (!*num_planes) + sizes[0] = icd->sizeimage; + else if (sizes[0] < icd->sizeimage) + return -EINVAL; + /* If *num_planes != 0, we have already verified *count. */ - if (!*num_planes && - sizes[0] * *count + mx3_cam->buf_total > MAX_VIDEO_MEM * 1024 * 1024) + if (sizes[0] * *count + mx3_cam->buf_total > MAX_VIDEO_MEM * 1024 * 1024) *count = (MAX_VIDEO_MEM * 1024 * 1024 - mx3_cam->buf_total) / sizes[0]; diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index ba8dcd11ae0e..bd721e35474a 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c @@ -28,9 +28,9 @@ #include <linux/platform_device.h> #include <linux/slab.h> -#include <media/omap1_camera.h> +#include <linux/platform_data/media/omap1_camera.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/videobuf-dma-contig.h> #include <media/videobuf-dma-sg.h> diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c index fcb942de0c7f..415f3bda60bf 100644 --- a/drivers/media/platform/soc_camera/pxa_camera.c +++ b/drivers/media/platform/soc_camera/pxa_camera.c @@ -33,13 +33,13 @@ #include <media/v4l2-dev.h> #include <media/videobuf-dma-sg.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-of.h> #include <linux/videodev2.h> #include <mach/dma.h> -#include <linux/platform_data/camera-pxa.h> +#include <linux/platform_data/media/camera-pxa.h> #define PXA_CAM_VERSION "0.0.6" #define PXA_CAM_DRV_NAME "pxa27x-camera" diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index efe57b23fac1..b7fd695b9ed5 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -21,14 +21,13 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/platform_data/camera-rcar.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> @@ -138,6 +137,11 @@ #define TIMEOUT_MS 100 +#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0) +#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1) +#define RCAR_VIN_BT601 (1 << 2) +#define RCAR_VIN_BT656 (1 << 3) + enum chip_id { RCAR_GEN2, RCAR_H1, @@ -527,46 +531,14 @@ struct rcar_vin_cam { * required */ static int rcar_vin_videobuf_setup(struct vb2_queue *vq, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct rcar_vin_priv *priv = ici->priv; - if (fmt) { - const struct soc_camera_format_xlate *xlate; - unsigned int bytes_per_line; - int ret; - - if (fmt->fmt.pix.sizeimage < icd->sizeimage) - return -EINVAL; - - xlate = soc_camera_xlate_by_fourcc(icd, - fmt->fmt.pix.pixelformat); - if (!xlate) - return -EINVAL; - ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width, - xlate->host_fmt); - if (ret < 0) - return ret; - - bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret); - - ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line, - fmt->fmt.pix.height); - if (ret < 0) - return ret; - - sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret); - } else { - /* Called from VIDIOC_REQBUFS or in compatibility mode */ - sizes[0] = icd->sizeimage; - } - alloc_ctxs[0] = priv->alloc_ctx; if (!vq->num_buffers) @@ -576,14 +548,18 @@ static int rcar_vin_videobuf_setup(struct vb2_queue *vq, *count = 2; priv->vb_count = *count; - *num_planes = 1; - /* Number of hardware slots */ if (is_continuous_transfer(priv)) priv->nr_hw_slots = MAX_BUFFER_NUM; else priv->nr_hw_slots = 1; + if (*num_planes) + return sizes[0] < icd->sizeimage ? -EINVAL : 0; + + sizes[0] = icd->sizeimage; + *num_planes = 1; + dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); return 0; @@ -912,7 +888,7 @@ static irqreturn_t rcar_vin_irq(int irq, void *data) priv->queue_buf[slot]->field = priv->field; priv->queue_buf[slot]->sequence = priv->sequence++; - v4l2_get_timestamp(&priv->queue_buf[slot]->timestamp); + priv->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf, VB2_BUF_STATE_DONE); priv->queue_buf[slot] = NULL; @@ -1853,63 +1829,43 @@ static const struct of_device_id rcar_vin_of_table[] = { MODULE_DEVICE_TABLE(of, rcar_vin_of_table); #endif -static struct platform_device_id rcar_vin_id_table[] = { - { "r8a7779-vin", RCAR_H1 }, - { "r8a7778-vin", RCAR_M1 }, - { "uPD35004-vin", RCAR_E1 }, - {}, -}; -MODULE_DEVICE_TABLE(platform, rcar_vin_id_table); - static int rcar_vin_probe(struct platform_device *pdev) { const struct of_device_id *match = NULL; struct rcar_vin_priv *priv; + struct v4l2_of_endpoint ep; + struct device_node *np; struct resource *mem; - struct rcar_vin_platform_data *pdata; unsigned int pdata_flags; int irq, ret; - if (pdev->dev.of_node) { - struct v4l2_of_endpoint ep; - struct device_node *np; + match = of_match_device(of_match_ptr(rcar_vin_of_table), &pdev->dev); - match = of_match_device(of_match_ptr(rcar_vin_of_table), - &pdev->dev); - - np = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); - if (!np) { - dev_err(&pdev->dev, "could not find endpoint\n"); - return -EINVAL; - } + np = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (!np) { + dev_err(&pdev->dev, "could not find endpoint\n"); + return -EINVAL; + } - ret = v4l2_of_parse_endpoint(np, &ep); - if (ret) { - dev_err(&pdev->dev, "could not parse endpoint\n"); - return ret; - } + ret = v4l2_of_parse_endpoint(np, &ep); + if (ret) { + dev_err(&pdev->dev, "could not parse endpoint\n"); + return ret; + } - if (ep.bus_type == V4L2_MBUS_BT656) - pdata_flags = RCAR_VIN_BT656; - else { - pdata_flags = 0; - if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) - pdata_flags |= RCAR_VIN_HSYNC_ACTIVE_LOW; - if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) - pdata_flags |= RCAR_VIN_VSYNC_ACTIVE_LOW; - } + if (ep.bus_type == V4L2_MBUS_BT656) + pdata_flags = RCAR_VIN_BT656; + else { + pdata_flags = 0; + if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) + pdata_flags |= RCAR_VIN_HSYNC_ACTIVE_LOW; + if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) + pdata_flags |= RCAR_VIN_VSYNC_ACTIVE_LOW; + } - of_node_put(np); + of_node_put(np); - dev_dbg(&pdev->dev, "pdata_flags = %08x\n", pdata_flags); - } else { - pdata = pdev->dev.platform_data; - if (!pdata || !pdata->flags) { - dev_err(&pdev->dev, "platform data not set\n"); - return -EINVAL; - } - pdata_flags = pdata->flags; - } + dev_dbg(&pdev->dev, "pdata_flags = %08x\n", pdata_flags); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (mem == NULL) @@ -1992,7 +1948,6 @@ static struct platform_driver rcar_vin_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(rcar_vin_of_table), }, - .id_table = rcar_vin_id_table, }; module_platform_driver(rcar_vin_driver); diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 67a669d826b8..90c87f2b4ec0 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -40,11 +40,11 @@ #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> -#include <media/sh_mobile_ceu.h> -#include <media/sh_mobile_csi2.h> +#include <media/drv-intf/sh_mobile_ceu.h> +#include <media/drv-intf/sh_mobile_csi2.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include "soc_scale_crop.h" @@ -210,43 +210,14 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) * for the current frame format if required */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; - if (fmt) { - const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd, - fmt->fmt.pix.pixelformat); - unsigned int bytes_per_line; - int ret; - - if (!xlate) - return -EINVAL; - - ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width, - xlate->host_fmt); - if (ret < 0) - return ret; - - bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret); - - ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line, - fmt->fmt.pix.height); - if (ret < 0) - return ret; - - sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret); - } else { - /* Called from VIDIOC_REQBUFS or in compatibility mode */ - sizes[0] = icd->sizeimage; - } - alloc_ctxs[0] = pcdev->alloc_ctx; if (!vq->num_buffers) @@ -255,8 +226,14 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, if (!*count) *count = 2; + /* Called from VIDIOC_REQBUFS or in compatibility mode */ + if (!*num_planes) + sizes[0] = icd->sizeimage; + else if (sizes[0] < icd->sizeimage) + return -EINVAL; + /* If *num_planes != 0, we have already verified *count. */ - if (pcdev->video_limit && !*num_planes) { + if (pcdev->video_limit) { size_t size = PAGE_ALIGN(sizes[0]) * *count; if (size + pcdev->buf_total > pcdev->video_limit) @@ -533,7 +510,7 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); - v4l2_get_timestamp(&vbuf->timestamp); + vbuf->vb2_buf.timestamp = ktime_get_ns(); if (!ret) { vbuf->field = pcdev->field; vbuf->sequence = pcdev->sequence++; diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c index 12d3626ecf22..09b18365a4b1 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c +++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c @@ -18,10 +18,10 @@ #include <linux/videodev2.h> #include <linux/module.h> -#include <media/sh_mobile_ceu.h> -#include <media/sh_mobile_csi2.h> +#include <media/drv-intf/sh_mobile_ceu.h> +#include <media/drv-intf/sh_mobile_csi2.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index dc98122e78dc..cc84c6d6a701 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -30,7 +30,7 @@ #include <linux/vmalloc.h> #include <media/soc_camera.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> #include <media/v4l2-async.h> #include <media/v4l2-clk.h> #include <media/v4l2-common.h> @@ -1360,7 +1360,7 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd, struct soc_camera_host_desc *shd = &sdesc->host_desc; struct i2c_adapter *adap; struct v4l2_subdev *subdev; - char clk_name[V4L2_SUBDEV_NAME_SIZE]; + char clk_name[V4L2_CLK_NAME_SIZE]; int ret; /* First find out how we link the main client */ @@ -1391,8 +1391,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd, ssdd->sd_pdata.regulators = NULL; shd->board_info->platform_data = ssdd; - snprintf(clk_name, sizeof(clk_name), "%d-%04x", - shd->i2c_adapter_id, shd->board_info->addr); + v4l2_clk_name_i2c(clk_name, sizeof(clk_name), + shd->i2c_adapter_id, shd->board_info->addr); icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); if (IS_ERR(icd->clk)) { @@ -1526,7 +1526,7 @@ static int scan_async_group(struct soc_camera_host *ici, struct soc_camera_async_client *sasc; struct soc_camera_device *icd; struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,}; - char clk_name[V4L2_SUBDEV_NAME_SIZE]; + char clk_name[V4L2_CLK_NAME_SIZE]; unsigned int i; int ret; @@ -1572,8 +1572,9 @@ static int scan_async_group(struct soc_camera_host *ici, icd->sasc = sasc; icd->parent = ici->v4l2_dev.dev; - snprintf(clk_name, sizeof(clk_name), "%d-%04x", - sasd->asd.match.i2c.adapter_id, sasd->asd.match.i2c.address); + v4l2_clk_name_i2c(clk_name, sizeof(clk_name), + sasd->asd.match.i2c.adapter_id, + sasd->asd.match.i2c.address); icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); if (IS_ERR(icd->clk)) { @@ -1631,7 +1632,7 @@ static int soc_of_bind(struct soc_camera_host *ici, struct soc_camera_async_client *sasc; struct soc_of_info *info; struct i2c_client *client; - char clk_name[V4L2_SUBDEV_NAME_SIZE + 32]; + char clk_name[V4L2_CLK_NAME_SIZE]; int ret; /* allocate a new subdev and add match info to it */ @@ -1674,11 +1675,11 @@ static int soc_of_bind(struct soc_camera_host *ici, client = of_find_i2c_device_by_node(remote); if (client) - snprintf(clk_name, sizeof(clk_name), "%d-%04x", - client->adapter->nr, client->addr); + v4l2_clk_name_i2c(clk_name, sizeof(clk_name), + client->adapter->nr, client->addr); else - snprintf(clk_name, sizeof(clk_name), "of-%s", - of_node_full_name(remote)); + v4l2_clk_name_of(clk_name, sizeof(clk_name), + of_node_full_name(remote)); icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); if (IS_ERR(icd->clk)) { diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c index cc8eb0758219..a51d2a42998c 100644 --- a/drivers/media/platform/soc_camera/soc_camera_platform.c +++ b/drivers/media/platform/soc_camera/soc_camera_platform.c @@ -18,7 +18,7 @@ #include <linux/videodev2.h> #include <media/v4l2-subdev.h> #include <media/soc_camera.h> -#include <media/soc_camera_platform.h> +#include <linux/platform_data/media/soc_camera_platform.h> struct soc_camera_platform_priv { struct v4l2_subdev subdev; diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c index 1dbcd426683c..e3e665e1c503 100644 --- a/drivers/media/platform/soc_camera/soc_mediabus.c +++ b/drivers/media/platform/soc_camera/soc_mediabus.c @@ -13,7 +13,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> -#include <media/soc_mediabus.h> +#include <media/drv-intf/soc_mediabus.h> static const struct soc_mbus_lookup mbus_fmt[] = { { diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index a0d267e017f6..d12a419c044a 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -191,7 +191,7 @@ static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state) dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (src_vb && dst_vb) { - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->timecode = src_vb->timecode; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= src_vb->flags & @@ -297,7 +297,7 @@ static int bdisp_get_bufs(struct bdisp_ctx *ctx) if (ret) return ret; - dst_vb->timestamp = src_vb->timestamp; + dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; return 0; } @@ -438,11 +438,9 @@ static void bdisp_ctrls_delete(struct bdisp_ctx *ctx) } static int bdisp_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nb_buf, unsigned int *nb_planes, unsigned int sizes[], void *allocators[]) { - const struct v4l2_format *fmt = parg; struct bdisp_ctx *ctx = vb2_get_drv_priv(vq); struct bdisp_frame *frame = ctx_get_frame(ctx, vq->type); @@ -455,13 +453,13 @@ static int bdisp_queue_setup(struct vb2_queue *vq, dev_err(ctx->bdisp_dev->dev, "Invalid format\n"); return -EINVAL; } + allocators[0] = ctx->bdisp_dev->alloc_ctx; - if (fmt && fmt->fmt.pix.sizeimage < frame->sizeimage) - return -EINVAL; + if (*nb_planes) + return sizes[0] < frame->sizeimage ? -EINVAL : 0; *nb_planes = 1; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : frame->sizeimage; - allocators[0] = ctx->bdisp_dev->alloc_ctx; + sizes[0] = frame->sizeimage; return 0; } diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c index 95223ab71e19..2dfbe8ab5214 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c @@ -209,18 +209,18 @@ void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe, tsin = fei->channel_data[n]; - if (tsin && tsin->frontend) { - dvb_unregister_frontend(tsin->frontend); - dvb_frontend_detach(tsin->frontend); - } + if (tsin) { + if (tsin->frontend) { + dvb_unregister_frontend(tsin->frontend); + dvb_frontend_detach(tsin->frontend); + } - if (tsin && tsin->i2c_adapter) i2c_put_adapter(tsin->i2c_adapter); - if (tsin && tsin->i2c_client) { - if (tsin->i2c_client->dev.driver->owner) + if (tsin->i2c_client) { module_put(tsin->i2c_client->dev.driver->owner); - i2c_unregister_device(tsin->i2c_client); + i2c_unregister_device(tsin->i2c_client); + } } } diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 8490a65ae1c6..78e3cb9a628f 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -823,7 +823,7 @@ static int c8sectpfe_probe(struct platform_device *pdev) } of_node_put(i2c_bus); - tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0); + tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0); ret = gpio_is_valid(tsin->rst_gpio); if (!ret) { diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index de24effd984f..1fa00c2cf3d7 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -1288,7 +1288,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) d_vb = ctx->dst_vb; d_vb->flags = s_vb->flags; - d_vb->timestamp = s_vb->timestamp; + d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp; if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE) d_vb->timecode = s_vb->timecode; @@ -1796,7 +1796,6 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = { * Queue operations */ static int vpe_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index 5820e45b3a9f..113c9f3c0b3e 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c @@ -31,7 +31,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <media/videobuf-dma-contig.h> -#include <media/timb_video.h> +#include <linux/platform_data/media/timb_video.h> #define DRIVER_NAME "timb-video" diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c index 32e4ff46daf3..1254f7e4d732 100644 --- a/drivers/media/platform/via-camera.c +++ b/drivers/media/platform/via-camera.c @@ -19,7 +19,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-image-sizes.h> -#include <media/ov7670.h> +#include <media/i2c/ov7670.h> #include <media/videobuf-dma-sg.h> #include <linux/delay.h> #include <linux/dma-mapping.h> diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index e18fb9f9ed2f..418113c99801 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -235,7 +235,7 @@ static int device_process(struct vim2m_ctx *ctx, out_vb->sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++; in_vb->sequence = q_data->sequence++; - out_vb->timestamp = in_vb->timestamp; + out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp; if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE) out_vb->timecode = in_vb->timecode; @@ -710,11 +710,9 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { */ static int vim2m_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct vim2m_ctx *ctx = vb2_get_drv_priv(vq); struct vim2m_q_data *q_data; unsigned int size, count = *nbuffers; @@ -723,17 +721,14 @@ static int vim2m_queue_setup(struct vb2_queue *vq, size = q_data->width * q_data->height * q_data->fmt->depth >> 3; - if (fmt) { - if (fmt->fmt.pix.sizeimage < size) - return -EINVAL; - size = fmt->fmt.pix.sizeimage; - } - while (size * count > MEM2MEM_VID_MEM_LIMIT) (count)--; + *nbuffers = count; + + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; - *nbuffers = count; sizes[0] = size; /* diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h index 55b304a705d5..751c1ba391e9 100644 --- a/drivers/media/platform/vivid/vivid-core.h +++ b/drivers/media/platform/vivid/vivid-core.h @@ -264,6 +264,7 @@ struct vivid_dev { bool vflip; bool vbi_cap_interlaced; bool loop_video; + bool reduced_fps; /* Framebuffer */ unsigned long video_pbase; @@ -285,7 +286,7 @@ struct vivid_dev { bool dqbuf_error; bool seq_wrap; bool time_wrap; - __kernel_time_t time_wrap_offset; + u64 time_wrap_offset; unsigned perc_dropped_buffers; enum vivid_signal_mode std_signal_mode; unsigned query_std_last; diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index f41ac0b01fec..b98089c95ef5 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -78,6 +78,7 @@ #define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 39) #define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 40) #define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41) +#define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42) #define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60) #define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61) @@ -424,6 +425,10 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) dev->sensor_vflip = ctrl->val; tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip); break; + case VIVID_CID_REDUCED_FPS: + dev->reduced_fps = ctrl->val; + vivid_update_format_cap(dev, true); + break; case VIVID_CID_HAS_CROP_CAP: dev->has_crop_cap = ctrl->val; vivid_update_format_cap(dev, true); @@ -601,6 +606,15 @@ static const struct v4l2_ctrl_config vivid_ctrl_vflip = { .step = 1, }; +static const struct v4l2_ctrl_config vivid_ctrl_reduced_fps = { + .ops = &vivid_vid_cap_ctrl_ops, + .id = VIVID_CID_REDUCED_FPS, + .name = "Reduced Framerate", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .max = 1, + .step = 1, +}; + static const struct v4l2_ctrl_config vivid_ctrl_has_crop_cap = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HAS_CROP_CAP, @@ -940,7 +954,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_out = { static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming); - struct timeval tv; + u64 rem; switch (ctrl->id) { case VIVID_CID_DQBUF_ERROR: @@ -979,8 +993,16 @@ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl) dev->time_wrap_offset = 0; break; } - v4l2_get_timestamp(&tv); - dev->time_wrap_offset = -tv.tv_sec - 16; + /* + * We want to set the time 16 seconds before the 32 bit tv_sec + * value of struct timeval would wrap around. So first we + * calculate ktime_get_ns() % ((1 << 32) * NSEC_PER_SEC), and + * then we set the offset to ((1 << 32) - 16) * NSEC_PER_SEC). + */ + div64_u64_rem(ktime_get_ns(), + 0x100000000ULL * NSEC_PER_SEC, &rem); + dev->time_wrap_offset = + (0x100000000ULL - 16) * NSEC_PER_SEC - rem; break; } return 0; @@ -1340,11 +1362,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap, v4l2_ctrl_handler_init(hdl_vid_cap, 55); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vid_out, 26); - v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL); + if (!no_error_inj) + v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vbi_cap, 21); v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vbi_out, 19); - v4l2_ctrl_new_custom(hdl_vbi_out, &vivid_ctrl_class, NULL); + if (!no_error_inj) + v4l2_ctrl_new_custom(hdl_vbi_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_radio_rx, 17); v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_radio_tx, 17); @@ -1414,6 +1438,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap, v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vflip, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_sav, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_eav, NULL); + v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_reduced_fps, NULL); if (show_ccs_cap) { dev->ctrl_has_crop_cap = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_has_crop_cap, NULL); diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index 83cc6d3b4784..9034281944a4 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -441,7 +441,7 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf) * "Start of Exposure". */ if (dev->tstamp_src_is_soe) - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); if (dev->field_cap == V4L2_FIELD_ALTERNATE) { /* * 60 Hz standards start with the bottom field, 50 Hz standards @@ -558,8 +558,8 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf) * the timestamp now. */ if (!dev->tstamp_src_is_soe) - v4l2_get_timestamp(&buf->vb.timestamp); - buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + buf->vb.vb2_buf.timestamp += dev->time_wrap_offset; } /* diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index c2c46dcdbe95..98eed5889bc1 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -95,8 +95,8 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev) */ vid_out_buf->vb.sequence /= 2; } - v4l2_get_timestamp(&vid_out_buf->vb.timestamp); - vid_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + vid_out_buf->vb.vb2_buf.timestamp = + ktime_get_ns() + dev->time_wrap_offset; vb2_buffer_done(&vid_out_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vid_out buffer %d done\n", @@ -108,8 +108,8 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev) vivid_sliced_vbi_out_process(dev, vbi_out_buf); vbi_out_buf->vb.sequence = dev->vbi_out_seq_count; - v4l2_get_timestamp(&vbi_out_buf->vb.timestamp); - vbi_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + vbi_out_buf->vb.vb2_buf.timestamp = + ktime_get_ns() + dev->time_wrap_offset; vb2_buffer_done(&vbi_out_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vbi_out buffer %d done\n", diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index 082c401764ce..3d1604cb982f 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c @@ -117,8 +117,8 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev) if (sdr_cap_buf) { sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count; vivid_sdr_cap_process(dev, sdr_cap_buf); - v4l2_get_timestamp(&sdr_cap_buf->vb.timestamp); - sdr_cap_buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + sdr_cap_buf->vb.vb2_buf.timestamp = + ktime_get_ns() + dev->time_wrap_offset; vb2_buffer_done(&sdr_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dev->dqbuf_error = false; @@ -213,7 +213,7 @@ static int vivid_thread_sdr_cap(void *data) return 0; } -static int sdr_cap_queue_setup(struct vb2_queue *vq, const void *parg, +static int sdr_cap_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c index e903d023e9df..cda45a582bfe 100644 --- a/drivers/media/platform/vivid/vivid-vbi-cap.c +++ b/drivers/media/platform/vivid/vivid-vbi-cap.c @@ -108,8 +108,7 @@ void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf); - v4l2_get_timestamp(&buf->vb.timestamp); - buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset; } @@ -133,11 +132,10 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, vbuf[i] = dev->vbi_gen.data[i]; } - v4l2_get_timestamp(&buf->vb.timestamp); - buf->vb.timestamp.tv_sec += dev->time_wrap_offset; + buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset; } -static int vbi_cap_queue_setup(struct vb2_queue *vq, const void *parg, +static int vbi_cap_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c index 75c5709f938e..3c5a469e6f49 100644 --- a/drivers/media/platform/vivid/vivid-vbi-out.c +++ b/drivers/media/platform/vivid/vivid-vbi-out.c @@ -27,7 +27,7 @@ #include "vivid-vbi-out.h" #include "vivid-vbi-cap.h" -static int vbi_out_queue_setup(struct vb2_queue *vq, const void *parg, +static int vbi_out_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index ef5412311b2f..b84f081c1b92 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -95,11 +95,10 @@ static const struct v4l2_discrete_probe webcam_probe = { VIVID_WEBCAM_SIZES }; -static int vid_cap_queue_setup(struct vb2_queue *vq, const void *parg, +static int vid_cap_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct vivid_dev *dev = vb2_get_drv_priv(vq); unsigned buffers = tpg_g_buffers(&dev->tpg); unsigned h = dev->fmt_cap_rect.height; @@ -122,27 +121,16 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, const void *parg, dev->queue_setup_error = false; return -EINVAL; } - if (fmt) { - const struct v4l2_pix_format_mplane *mp; - struct v4l2_format mp_fmt; - const struct vivid_fmt *vfmt; - - if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) { - fmt_sp2mp(fmt, &mp_fmt); - fmt = &mp_fmt; - } - mp = &fmt->fmt.pix_mp; + if (*nplanes) { /* - * Check if the number of planes in the specified format match + * Check if the number of requested planes match * the number of buffers in the current format. You can't mix that. */ - if (mp->num_planes != buffers) + if (*nplanes != buffers) return -EINVAL; - vfmt = vivid_get_format(dev, mp->pixelformat); for (p = 0; p < buffers; p++) { - sizes[p] = mp->plane_fmt[p].sizeimage; if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + - vfmt->data_offset[p]) + dev->fmt_cap->data_offset[p]) return -EINVAL; } } else { @@ -405,6 +393,7 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) { struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt; unsigned size; + u64 pixelclock; switch (dev->input_type[dev->input]) { case WEBCAM: @@ -434,8 +423,15 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) dev->src_rect.width = bt->width; dev->src_rect.height = bt->height; size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); + if (dev->reduced_fps && can_reduce_fps(bt)) { + pixelclock = div_u64(bt->pixelclock * 1000, 1001); + bt->flags |= V4L2_DV_FL_REDUCED_FPS; + } else { + pixelclock = bt->pixelclock; + bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; + } dev->timeperframe_vid_cap = (struct v4l2_fract) { - size / 100, (u32)bt->pixelclock / 100 + size / 100, (u32)pixelclock / 100 }; if (bt->interlaced) dev->field_cap = V4L2_FIELD_ALTERNATE; @@ -1662,7 +1658,7 @@ int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, !valid_cvt_gtf_timings(timings)) return -EINVAL; - if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0)) + if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0, false)) return 0; if (vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index b77acb6a7013..64e4d66482c1 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -31,11 +31,10 @@ #include "vivid-kthread-out.h" #include "vivid-vid-out.h" -static int vid_out_queue_setup(struct vb2_queue *vq, const void *parg, +static int vid_out_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct vivid_dev *dev = vb2_get_drv_priv(vq); const struct vivid_fmt *vfmt = dev->fmt_out; unsigned planes = vfmt->buffers; @@ -64,26 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const void *parg, return -EINVAL; } - if (fmt) { - const struct v4l2_pix_format_mplane *mp; - struct v4l2_format mp_fmt; - - if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) { - fmt_sp2mp(fmt, &mp_fmt); - fmt = &mp_fmt; - } - mp = &fmt->fmt.pix_mp; + if (*nplanes) { /* - * Check if the number of planes in the specified format match + * Check if the number of requested planes match * the number of planes in the current format. You can't mix that. */ - if (mp->num_planes != planes) + if (*nplanes != planes) return -EINVAL; - sizes[0] = mp->plane_fmt[0].sizeimage; if (sizes[0] < size) return -EINVAL; for (p = 1; p < planes; p++) { - sizes[p] = mp->plane_fmt[p].sizeimage; if (sizes[p] < dev->bytesperline_out[p] * h) return -EINVAL; } @@ -224,6 +213,7 @@ void vivid_update_format_out(struct vivid_dev *dev) { struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt; unsigned size, p; + u64 pixelclock; switch (dev->output_type[dev->output]) { case SVID: @@ -245,8 +235,14 @@ void vivid_update_format_out(struct vivid_dev *dev) dev->sink_rect.width = bt->width; dev->sink_rect.height = bt->height; size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); + + if (can_reduce_fps(bt) && (bt->flags & V4L2_DV_FL_REDUCED_FPS)) + pixelclock = div_u64(bt->pixelclock * 1000, 1001); + else + pixelclock = bt->pixelclock; + dev->timeperframe_vid_out = (struct v4l2_fract) { - size / 100, (u32)bt->pixelclock / 100 + size / 100, (u32)pixelclock / 100 }; if (bt->interlaced) dev->field_out = V4L2_FIELD_ALTERNATE; @@ -1149,7 +1145,7 @@ int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, 0, NULL, NULL) && !valid_cvt_gtf_timings(timings)) return -EINVAL; - if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0)) + if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0, true)) return 0; if (vb2_is_busy(&dev->vb_vid_out_q)) return -EBUSY; diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 5ce88e1f5d71..45eb65fa23db 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -274,35 +274,6 @@ static int __vsp1_video_try_format(struct vsp1_video *video, return 0; } -static bool -vsp1_video_format_adjust(struct vsp1_video *video, - const struct v4l2_pix_format_mplane *format, - struct v4l2_pix_format_mplane *adjust) -{ - unsigned int i; - - *adjust = *format; - __vsp1_video_try_format(video, adjust, NULL); - - if (format->width != adjust->width || - format->height != adjust->height || - format->pixelformat != adjust->pixelformat || - format->num_planes != adjust->num_planes) - return false; - - for (i = 0; i < format->num_planes; ++i) { - if (format->plane_fmt[i].bytesperline != - adjust->plane_fmt[i].bytesperline) - return false; - - adjust->plane_fmt[i].sizeimage = - max(adjust->plane_fmt[i].sizeimage, - format->plane_fmt[i].sizeimage); - } - - return true; -} - /* ----------------------------------------------------------------------------- * Pipeline Management */ @@ -611,7 +582,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video) spin_unlock_irqrestore(&video->irqlock, flags); done->buf.sequence = video->sequence++; - v4l2_get_timestamp(&done->buf.timestamp); + done->buf.vb2_buf.timestamp = ktime_get_ns(); for (i = 0; i < done->buf.vb2_buf.num_planes; ++i) vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]); vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE); @@ -787,26 +758,24 @@ void vsp1_pipelines_resume(struct vsp1_device *vsp1) */ static int -vsp1_video_queue_setup(struct vb2_queue *vq, const void *parg, +vsp1_video_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct vsp1_video *video = vb2_get_drv_priv(vq); - const struct v4l2_pix_format_mplane *format; - struct v4l2_pix_format_mplane pix_mp; + const struct v4l2_pix_format_mplane *format = &video->format; unsigned int i; - if (fmt) { - /* Make sure the format is valid and adjust the sizeimage field - * if needed. - */ - if (!vsp1_video_format_adjust(video, &fmt->fmt.pix_mp, &pix_mp)) + if (*nplanes) { + if (*nplanes != format->num_planes) return -EINVAL; - format = &pix_mp; - } else { - format = &video->format; + for (i = 0; i < *nplanes; i++) { + if (sizes[i] < format->plane_fmt[i].sizeimage) + return -EINVAL; + alloc_ctxs[i] = video->alloc_ctx; + } + return 0; } *nplanes = format->num_planes; diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c index d11cc7072cd5..722758f33924 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.c +++ b/drivers/media/platform/xilinx/xilinx-dma.c @@ -303,27 +303,25 @@ static void xvip_dma_complete(void *param) buf->buf.field = V4L2_FIELD_NONE; buf->buf.sequence = dma->sequence++; - v4l2_get_timestamp(&buf->buf.timestamp); + buf->buf.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage); vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); } static int -xvip_dma_queue_setup(struct vb2_queue *vq, const void *parg, +xvip_dma_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct xvip_dma *dma = vb2_get_drv_priv(vq); + alloc_ctxs[0] = dma->alloc_ctx; /* Make sure the image size is large enough. */ - if (fmt && fmt->fmt.pix.sizeimage < dma->format.sizeimage) - return -EINVAL; + if (*nplanes) + return sizes[0] < dma->format.sizeimage ? -EINVAL : 0; *nplanes = 1; - - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : dma->format.sizeimage; - alloc_ctxs[0] = dma->alloc_ctx; + sizes[0] = dma->format.sizeimage; return 0; } diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c index b5f7d5ecb7f6..8bd7e3736019 100644 --- a/drivers/media/platform/xilinx/xilinx-tpg.c +++ b/drivers/media/platform/xilinx/xilinx-tpg.c @@ -731,6 +731,7 @@ static int xtpg_parse_of(struct xtpg_device *xtpg) format = xvip_of_get_format(port); if (IS_ERR(format)) { dev_err(dev, "invalid format in DT"); + of_node_put(port); return PTR_ERR(format); } @@ -739,6 +740,7 @@ static int xtpg_parse_of(struct xtpg_device *xtpg) xtpg->vip_format = format; } else if (xtpg->vip_format != format) { dev_err(dev, "in/out format mismatch in DT"); + of_node_put(port); return -EINVAL; } diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index 7b7cb9c28d2c..b9bf24fefa5a 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -476,8 +476,10 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev) for_each_child_of_node(ports, port) { ret = xvip_graph_dma_init_one(xdev, port); - if (ret < 0) + if (ret < 0) { + of_node_put(port); return ret; + } } return 0; diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 5236035f0f2a..70fd8e80198a 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -42,7 +42,7 @@ #include <linux/videodev2.h> #include <linux/io.h> #include <linux/slab.h> -#include <media/tea575x.h> +#include <media/drv-intf/tea575x.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> @@ -108,7 +108,7 @@ static void maxiradio_tea575x_set_direction(struct snd_tea575x *tea, bool output { } -static struct snd_tea575x_ops maxiradio_tea_ops = { +static const struct snd_tea575x_ops maxiradio_tea_ops = { .set_pins = maxiradio_tea575x_set_pins, .get_pins = maxiradio_tea575x_get_pins, .set_direction = maxiradio_tea575x_set_direction, diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index b8d61cbc18cb..dc81d422b394 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -14,7 +14,7 @@ #include <linux/io.h> /* outb, outb_p */ #include <linux/isa.h> #include <linux/pnp.h> -#include <media/tea575x.h> +#include <media/drv-intf/tea575x.h> MODULE_AUTHOR("Ondrej Zary"); MODULE_DESCRIPTION("MediaForte SF16-FMR2 and SF16-FMD2 FM radio card driver"); @@ -82,7 +82,7 @@ static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output) { } -static struct snd_tea575x_ops fmr2_tea_ops = { +static const struct snd_tea575x_ops fmr2_tea_ops = { .set_pins = fmr2_tea575x_set_pins, .get_pins = fmr2_tea575x_get_pins, .set_direction = fmr2_tea575x_set_direction, diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 050b3bb96fec..85667a95f003 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -33,7 +33,7 @@ #include <linux/usb.h> #include <linux/workqueue.h> #include <media/v4l2-device.h> -#include <media/tea575x.h> +#include <media/drv-intf/tea575x.h> #if defined(CONFIG_LEDS_CLASS) || \ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) @@ -150,7 +150,7 @@ static u32 shark_read_val(struct snd_tea575x *tea) return val; } -static struct snd_tea575x_ops shark_tea_ops = { +static const struct snd_tea575x_ops shark_tea_ops = { .write_val = shark_write_val, .read_val = shark_read_val, }; diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index 8654e0dc5c95..0e65a85d52c6 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -137,7 +137,7 @@ static int shark_read_reg(struct radio_tea5777 *tea, u32 *reg_ret) return 0; } -static struct radio_tea5777_ops shark_tea_ops = { +static const struct radio_tea5777_ops shark_tea_ops = { .write_reg = shark_write_reg, .read_reg = shark_read_reg, }; diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9cbb8cdf0ac0..859f0c08ee05 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -31,7 +31,7 @@ #include <media/v4l2-event.h> #include <media/v4l2-device.h> -#include <media/si476x.h> +#include <media/drv-intf/si476x.h> #include <linux/mfd/si476x-core.h> #define FM_FREQ_RANGE_LOW 64000000 diff --git a/drivers/media/radio/radio-tea5777.h b/drivers/media/radio/radio-tea5777.h index 4ea43a90a151..4bd942526a1b 100644 --- a/drivers/media/radio/radio-tea5777.h +++ b/drivers/media/radio/radio-tea5777.h @@ -76,7 +76,7 @@ struct radio_tea5777 { u32 read_reg; u64 write_reg; struct mutex mutex; - struct radio_tea5777_ops *ops; + const struct radio_tea5777_ops *ops; void *private_data; u8 card[32]; u8 bus_info[32]; diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c index 04baafe5e901..a82eb9678d6c 100644 --- a/drivers/media/radio/radio-timb.c +++ b/drivers/media/radio/radio-timb.c @@ -26,7 +26,7 @@ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/module.h> -#include <media/timb_radio.h> +#include <linux/platform_data/media/timb_radio.h> #define DRIVER_NAME "timb-radio" diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c index a77319dcba05..5146be2a1a50 100644 --- a/drivers/media/radio/si4713/radio-usb-si4713.c +++ b/drivers/media/radio/si4713/radio-usb-si4713.c @@ -31,7 +31,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/si4713.h> +#include <linux/platform_data/media/si4713.h> #include "si4713.h" diff --git a/drivers/media/radio/si4713/si4713.h b/drivers/media/radio/si4713/si4713.h index 8a376e142188..29d0e1f104d2 100644 --- a/drivers/media/radio/si4713/si4713.h +++ b/drivers/media/radio/si4713/si4713.h @@ -20,7 +20,7 @@ #include <linux/gpio/consumer.h> #include <media/v4l2-subdev.h> #include <media/v4l2-ctrls.h> -#include <media/si4713.h> +#include <linux/platform_data/media/si4713.h> #define SI4713_PRODUCT_NUMBER 0x0D diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c index 43d1ea53cb66..3e08475af579 100644 --- a/drivers/media/radio/tea575x.c +++ b/drivers/media/radio/tea575x.c @@ -31,7 +31,7 @@ #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/tea575x.h> +#include <media/drv-intf/tea575x.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips"); diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index b6e13116c6f5..bd4d68500085 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -101,7 +101,8 @@ config IR_SHARP_DECODER ---help--- Enable this option if you have an infrared remote control which - uses the Sharp protocol, and you need software decoding support. + uses the Sharp protocol (Sharp, Denon), and you need software + decoding support. config IR_MCE_KBD_DECODER tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol" diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 7dbc9ca6d885..5b63b1f15cb1 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -21,7 +21,7 @@ #include <linux/platform_device.h> #include <linux/irq.h> #include <media/rc-core.h> -#include <media/gpio-ir-recv.h> +#include <linux/platform_data/media/gpio-ir-recv.h> #define GPIO_IR_DRIVER_NAME "gpio-rc-recv" #define GPIO_IR_DEVICE_NAME "gpio_ir_recv" @@ -30,6 +30,7 @@ struct gpio_rc_dev { struct rc_dev *rcdev; int gpio_nr; bool active_low; + struct timer_list flush_timer; }; #ifdef CONFIG_OF @@ -93,12 +94,26 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) if (rc < 0) goto err_get_value; + mod_timer(&gpio_dev->flush_timer, + jiffies + nsecs_to_jiffies(gpio_dev->rcdev->timeout)); + ir_raw_event_handle(gpio_dev->rcdev); err_get_value: return IRQ_HANDLED; } +static void flush_timer(unsigned long arg) +{ + struct gpio_rc_dev *gpio_dev = (struct gpio_rc_dev *)arg; + DEFINE_IR_RAW_EVENT(ev); + + ev.timeout = true; + ev.duration = gpio_dev->rcdev->timeout; + ir_raw_event_store(gpio_dev->rcdev, &ev); + ir_raw_event_handle(gpio_dev->rcdev); +} + static int gpio_ir_recv_probe(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev; @@ -144,6 +159,9 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) rcdev->input_id.version = 0x0100; rcdev->dev.parent = &pdev->dev; rcdev->driver_name = GPIO_IR_DRIVER_NAME; + rcdev->min_timeout = 0; + rcdev->timeout = IR_DEFAULT_TIMEOUT; + rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; if (pdata->allowed_protos) rcdev->allowed_protocols = pdata->allowed_protos; else @@ -154,6 +172,9 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) gpio_dev->gpio_nr = pdata->gpio_nr; gpio_dev->active_low = pdata->active_low; + setup_timer(&gpio_dev->flush_timer, flush_timer, + (unsigned long)gpio_dev); + rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv"); if (rc < 0) goto err_gpio_request; @@ -196,6 +217,7 @@ static int gpio_ir_recv_remove(struct platform_device *pdev) struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev); + del_timer_sync(&gpio_dev->flush_timer); rc_unregister_device(gpio_dev->rcdev); gpio_free(gpio_dev->gpio_nr); kfree(gpio_dev); diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c index 30bcf188d377..182402f7b4d1 100644 --- a/drivers/media/rc/ir-jvc-decoder.c +++ b/drivers/media/rc/ir-jvc-decoder.c @@ -47,9 +47,6 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct jvc_dec *data = &dev->raw->jvc; - if (!(dev->enabled_protocols & RC_BIT_JVC)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index a32659fcd266..5effc65d2947 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -415,6 +415,7 @@ static int ir_lirc_unregister(struct rc_dev *dev) lirc_unregister_driver(lirc->drv->minor); lirc_buffer_free(lirc->drv->rbuf); + kfree(lirc->drv->rbuf); kfree(lirc->drv); return 0; diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c index 9f3c9b59f30c..d80986251ee0 100644 --- a/drivers/media/rc/ir-mce_kbd-decoder.c +++ b/drivers/media/rc/ir-mce_kbd-decoder.c @@ -216,9 +216,6 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev) u32 scancode; unsigned long delay; - if (!(dev->enabled_protocols & RC_BIT_MCE_KBD)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c index 7b81fec0820f..bea0d1eedee0 100644 --- a/drivers/media/rc/ir-nec-decoder.c +++ b/drivers/media/rc/ir-nec-decoder.c @@ -52,9 +52,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) u8 address, not_address, command, not_command; bool send_32bits = false; - if (!(dev->enabled_protocols & RC_BIT_NEC)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c index 84fa6e9b59a1..6ffe776abf6b 100644 --- a/drivers/media/rc/ir-rc5-decoder.c +++ b/drivers/media/rc/ir-rc5-decoder.c @@ -53,9 +53,6 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) u32 scancode; enum rc_type protocol; - if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ))) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index d16bc67af732..e0e2edefa651 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -90,11 +90,6 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) u8 toggle; enum rc_type protocol; - if (!(dev->enabled_protocols & - (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | - RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE))) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c index b1e19a26208d..4e1711a40466 100644 --- a/drivers/media/rc/ir-rx51.c +++ b/drivers/media/rc/ir-rx51.c @@ -31,7 +31,7 @@ #include <media/lirc.h> #include <media/lirc_dev.h> -#include <media/ir-rx51.h> +#include <linux/platform_data/media/ir-rx51.h> #define LIRC_RX51_DRIVER_FEATURES (LIRC_CAN_SET_SEND_DUTY_CYCLE | \ LIRC_CAN_SET_SEND_CARRIER | \ diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c index ad1dc6ae21fc..7331e5e7c497 100644 --- a/drivers/media/rc/ir-sanyo-decoder.c +++ b/drivers/media/rc/ir-sanyo-decoder.c @@ -58,9 +58,6 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev) u32 scancode; u8 address, command, not_command; - if (!(dev->enabled_protocols & RC_BIT_SANYO)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) { IR_dprintk(1, "SANYO event reset received. reset to state 0\n"); diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c index b7acdbae8159..317677f06f2c 100644 --- a/drivers/media/rc/ir-sharp-decoder.c +++ b/drivers/media/rc/ir-sharp-decoder.c @@ -48,9 +48,6 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev) struct sharp_dec *data = &dev->raw->sharp; u32 msg, echo, address, command, scancode; - if (!(dev->enabled_protocols & RC_BIT_SHARP)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; @@ -118,7 +115,9 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev) if (data->count == SHARP_NBITS) { /* exp,chk bits should be 1,0 */ - if ((data->bits & 0x3) != 0x2) + if ((data->bits & 0x3) != 0x2 && + /* DENON variant, both chk bits 0 */ + (data->bits & 0x3) != 0x0) break; data->state = STATE_ECHO_SPACE; } else { diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c index 58ef06f35175..baa972c76e0e 100644 --- a/drivers/media/rc/ir-sony-decoder.c +++ b/drivers/media/rc/ir-sony-decoder.c @@ -46,10 +46,6 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) u32 scancode; u8 device, subdevice, function; - if (!(dev->enabled_protocols & - (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20))) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c index 1017d4816e8d..18596190bbb8 100644 --- a/drivers/media/rc/ir-xmp-decoder.c +++ b/drivers/media/rc/ir-xmp-decoder.c @@ -43,9 +43,6 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct xmp_dec *data = &dev->raw->xmp; - if (!(dev->enabled_protocols & RC_BIT_XMP)) - return 0; - if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 85af7a869167..18adf580f502 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -39,6 +39,18 @@ #include "nuvoton-cir.h" +static const struct nvt_chip nvt_chips[] = { + { "w83667hg", NVT_W83667HG }, + { "NCT6775F", NVT_6775F }, + { "NCT6776F", NVT_6776F }, + { "NCT6779D", NVT_6779D }, +}; + +static inline bool is_w83667hg(struct nvt_dev *nvt) +{ + return nvt->chip_ver == NVT_W83667HG; +} + /* write val to config reg */ static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg) { @@ -224,74 +236,60 @@ static void cir_wake_dump_regs(struct nvt_dev *nvt) pr_cont("\n"); } +static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nvt_chips); i++) + if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) { + nvt->chip_ver = nvt_chips[i].chip_ver; + return nvt_chips[i].name; + } + + return NULL; +} + + /* detect hardware features */ -static int nvt_hw_detect(struct nvt_dev *nvt) +static void nvt_hw_detect(struct nvt_dev *nvt) { - unsigned long flags; - u8 chip_major, chip_minor; - char chip_id[12]; - bool chip_unknown = false; + const char *chip_name; + int chip_id; nvt_efm_enable(nvt); /* Check if we're wired for the alternate EFER setup */ - chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); - if (chip_major == 0xff) { + nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); + if (nvt->chip_major == 0xff) { nvt->cr_efir = CR_EFIR2; nvt->cr_efdr = CR_EFDR2; nvt_efm_enable(nvt); - chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); + nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); } - chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); - - /* these are the known working chip revisions... */ - switch (chip_major) { - case CHIP_ID_HIGH_667: - strcpy(chip_id, "w83667hg\0"); - if (chip_minor != CHIP_ID_LOW_667) - chip_unknown = true; - break; - case CHIP_ID_HIGH_677B: - strcpy(chip_id, "w83677hg\0"); - if (chip_minor != CHIP_ID_LOW_677B2 && - chip_minor != CHIP_ID_LOW_677B3) - chip_unknown = true; - break; - case CHIP_ID_HIGH_677C: - strcpy(chip_id, "w83677hg-c\0"); - if (chip_minor != CHIP_ID_LOW_677C) - chip_unknown = true; - break; - default: - strcpy(chip_id, "w836x7hg\0"); - chip_unknown = true; - break; - } + nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); + + chip_id = nvt->chip_major << 8 | nvt->chip_minor; + chip_name = nvt_find_chip(nvt, chip_id); /* warn, but still let the driver load, if we don't know this chip */ - if (chip_unknown) - nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, " - "it may not work...", chip_id, chip_major, chip_minor); + if (!chip_name) + dev_warn(&nvt->pdev->dev, + "unknown chip, id: 0x%02x 0x%02x, it may not work...", + nvt->chip_major, nvt->chip_minor); else - nvt_dbg("%s: chip id: 0x%02x 0x%02x", - chip_id, chip_major, chip_minor); + dev_info(&nvt->pdev->dev, + "found %s or compatible: chip id: 0x%02x 0x%02x", + chip_name, nvt->chip_major, nvt->chip_minor); nvt_efm_disable(nvt); - - spin_lock_irqsave(&nvt->nvt_lock, flags); - nvt->chip_major = chip_major; - nvt->chip_minor = chip_minor; - spin_unlock_irqrestore(&nvt->nvt_lock, flags); - - return 0; } static void nvt_cir_ldev_init(struct nvt_dev *nvt) { u8 val, psreg, psmask, psval; - if (nvt->chip_major == CHIP_ID_HIGH_667) { + if (is_w83667hg(nvt)) { psreg = CR_MULTIFUNC_PIN_SEL; psmask = MULTIFUNC_PIN_SEL_MASK; psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB; @@ -485,8 +483,9 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt) duration *= SAMPLE_PERIOD; if (!count || !duration) { - nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)", - count, duration); + dev_notice(&nvt->pdev->dev, + "Unable to determine carrier! (c:%u, d:%u)", + count, duration); return 0; } @@ -661,7 +660,7 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt) { - nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!"); + dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!"); nvt->pkts = 0; nvt_clear_cir_fifo(nvt); @@ -719,7 +718,7 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) static void nvt_cir_log_irqs(u8 status, u8 iren) { - nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s", + nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s", status, iren, status & CIR_IRSTS_RDR ? " RDR" : "", status & CIR_IRSTS_RTR ? " RTR" : "", @@ -779,7 +778,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) if (!status) { nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__); nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); - return IRQ_RETVAL(IRQ_NONE); + return IRQ_NONE; } /* ack/clear all irq flags we've got */ @@ -790,11 +789,10 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) iren = nvt_cir_reg_read(nvt, CIR_IREN); if (!iren) { nvt_dbg_verbose("%s exiting, CIR not enabled", __func__); - return IRQ_RETVAL(IRQ_NONE); + return IRQ_NONE; } - if (debug) - nvt_cir_log_irqs(status, iren); + nvt_cir_log_irqs(status, iren); if (status & CIR_IRSTS_RTR) { /* FIXME: add code for study/learn mode */ @@ -853,7 +851,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) } nvt_dbg_verbose("%s done", __func__); - return IRQ_RETVAL(IRQ_HANDLED); + return IRQ_HANDLED; } /* Interrupt service routine for CIR Wake */ @@ -867,7 +865,7 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS); if (!status) - return IRQ_RETVAL(IRQ_NONE); + return IRQ_NONE; if (status & CIR_WAKE_IRSTS_IR_PENDING) nvt_clear_cir_wake_fifo(nvt); @@ -879,7 +877,7 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN); if (!iren) { nvt_dbg_wake("%s exiting, wake not enabled", __func__); - return IRQ_RETVAL(IRQ_HANDLED); + return IRQ_HANDLED; } if ((status & CIR_WAKE_IRSTS_PE) && @@ -896,7 +894,7 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) } nvt_dbg_wake("%s done", __func__); - return IRQ_RETVAL(IRQ_HANDLED); + return IRQ_HANDLED; } static void nvt_enable_cir(struct nvt_dev *nvt) @@ -974,7 +972,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) struct rc_dev *rdev; int ret = -ENOMEM; - nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL); + nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL); if (!nvt) return ret; @@ -1026,9 +1024,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) init_waitqueue_head(&nvt->tx.queue); - ret = nvt_hw_detect(nvt); - if (ret) - goto exit_free_dev_rdev; + nvt_hw_detect(nvt); /* Initialize CIR & CIR Wake Logical Devices */ nvt_efm_enable(nvt); @@ -1074,25 +1070,26 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) ret = -EBUSY; /* now claim resources */ - if (!request_region(nvt->cir_addr, + if (!devm_request_region(&pdev->dev, nvt->cir_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) goto exit_unregister_device; - if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, - NVT_DRIVER_NAME, (void *)nvt)) - goto exit_release_cir_addr; + if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr, + IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt)) + goto exit_unregister_device; - if (!request_region(nvt->cir_wake_addr, + if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) - goto exit_free_irq; + goto exit_unregister_device; - if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, - NVT_DRIVER_NAME, (void *)nvt)) - goto exit_release_cir_wake_addr; + if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq, + nvt_cir_wake_isr, IRQF_SHARED, + NVT_DRIVER_NAME, (void *)nvt)) + goto exit_unregister_device; device_init_wakeup(&pdev->dev, true); - nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n"); + dev_notice(&pdev->dev, "driver has been successfully loaded\n"); if (debug) { cir_dump_regs(nvt); cir_wake_dump_regs(nvt); @@ -1100,18 +1097,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) return 0; -exit_release_cir_wake_addr: - release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH); -exit_free_irq: - free_irq(nvt->cir_irq, nvt); -exit_release_cir_addr: - release_region(nvt->cir_addr, CIR_IOREG_LENGTH); exit_unregister_device: rc_unregister_device(rdev); rdev = NULL; exit_free_dev_rdev: rc_free_device(rdev); - kfree(nvt); return ret; } @@ -1129,15 +1119,7 @@ static void nvt_remove(struct pnp_dev *pdev) nvt_enable_wake(nvt); spin_unlock_irqrestore(&nvt->nvt_lock, flags); - /* free resources */ - free_irq(nvt->cir_irq, nvt); - free_irq(nvt->cir_wake_irq, nvt); - release_region(nvt->cir_addr, CIR_IOREG_LENGTH); - release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH); - rc_unregister_device(nvt->rdev); - - kfree(nvt); } static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index e1cf23c3875b..0ad15d34e9c9 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -35,9 +35,6 @@ static int debug; -#define nvt_pr(level, text, ...) \ - printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__) - #define nvt_dbg(text, ...) \ if (debug) \ printk(KERN_DEBUG \ @@ -64,6 +61,21 @@ static int debug; #define TX_BUF_LEN 256 #define RX_BUF_LEN 32 +#define SIO_ID_MASK 0xfff0 + +enum nvt_chip_ver { + NVT_UNKNOWN = 0, + NVT_W83667HG = 0xa510, + NVT_6775F = 0xb470, + NVT_6776F = 0xc330, + NVT_6779D = 0xc560 +}; + +struct nvt_chip { + const char *name; + enum nvt_chip_ver chip_ver; +}; + struct nvt_dev { struct pnp_dev *pdev; struct rc_dev *rdev; @@ -93,6 +105,7 @@ struct nvt_dev { int cir_irq; int cir_wake_irq; + enum nvt_chip_ver chip_ver; /* hardware id */ u8 chip_major; u8 chip_minor; @@ -326,15 +339,6 @@ struct nvt_dev { #define EFER_EFM_ENABLE 0x87 #define EFER_EFM_DISABLE 0xaa -/* Chip IDs found in CR_CHIP_ID_{HI,LO} */ -#define CHIP_ID_HIGH_667 0xa5 -#define CHIP_ID_HIGH_677B 0xb4 -#define CHIP_ID_HIGH_677C 0xc3 -#define CHIP_ID_LOW_667 0x13 -#define CHIP_ID_LOW_677B2 0x72 -#define CHIP_ID_LOW_677B3 0x73 -#define CHIP_ID_LOW_677C 0x33 - /* Config regs we need to care about */ #define CR_SOFTWARE_RESET 0x02 #define CR_LOGICAL_DEV_SEL 0x07 diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index b68d4f762734..7359f3d03b64 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -167,75 +167,4 @@ void ir_raw_init(void); * loads the compiled decoders for their usage with IR raw events */ -/* from ir-nec-decoder.c */ -#ifdef CONFIG_IR_NEC_DECODER_MODULE -#define load_nec_decode() request_module_nowait("ir-nec-decoder") -#else -static inline void load_nec_decode(void) { } -#endif - -/* from ir-rc5-decoder.c */ -#ifdef CONFIG_IR_RC5_DECODER_MODULE -#define load_rc5_decode() request_module_nowait("ir-rc5-decoder") -#else -static inline void load_rc5_decode(void) { } -#endif - -/* from ir-rc6-decoder.c */ -#ifdef CONFIG_IR_RC6_DECODER_MODULE -#define load_rc6_decode() request_module_nowait("ir-rc6-decoder") -#else -static inline void load_rc6_decode(void) { } -#endif - -/* from ir-jvc-decoder.c */ -#ifdef CONFIG_IR_JVC_DECODER_MODULE -#define load_jvc_decode() request_module_nowait("ir-jvc-decoder") -#else -static inline void load_jvc_decode(void) { } -#endif - -/* from ir-sony-decoder.c */ -#ifdef CONFIG_IR_SONY_DECODER_MODULE -#define load_sony_decode() request_module_nowait("ir-sony-decoder") -#else -static inline void load_sony_decode(void) { } -#endif - -/* from ir-sanyo-decoder.c */ -#ifdef CONFIG_IR_SANYO_DECODER_MODULE -#define load_sanyo_decode() request_module_nowait("ir-sanyo-decoder") -#else -static inline void load_sanyo_decode(void) { } -#endif - -/* from ir-sharp-decoder.c */ -#ifdef CONFIG_IR_SHARP_DECODER_MODULE -#define load_sharp_decode() request_module_nowait("ir-sharp-decoder") -#else -static inline void load_sharp_decode(void) { } -#endif - -/* from ir-mce_kbd-decoder.c */ -#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE -#define load_mce_kbd_decode() request_module_nowait("ir-mce_kbd-decoder") -#else -static inline void load_mce_kbd_decode(void) { } -#endif - -/* from ir-lirc-codec.c */ -#ifdef CONFIG_IR_LIRC_CODEC_MODULE -#define load_lirc_codec() request_module_nowait("ir-lirc-codec") -#else -static inline void load_lirc_codec(void) { } -#endif - -/* from ir-xmp-decoder.c */ -#ifdef CONFIG_IR_XMP_DECODER_MODULE -#define load_xmp_decode() request_module_nowait("ir-xmp-decoder") -#else -static inline void load_xmp_decode(void) { } -#endif - - #endif /* _RC_CORE_PRIV */ diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index ad260520a9d4..c69807fe2fef 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -59,7 +59,9 @@ static int ir_raw_event_thread(void *data) mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) - handler->decode(raw->dev, ev); + if (raw->dev->enabled_protocols & handler->protocols || + !handler->protocols) + handler->decode(raw->dev, ev); raw->prev_ev = ev; mutex_unlock(&ir_raw_handler_lock); } @@ -246,6 +248,14 @@ static int change_protocol(struct rc_dev *dev, u64 *rc_type) return 0; } +static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols) +{ + mutex_lock(&dev->lock); + dev->enabled_protocols &= ~protocols; + dev->enabled_wakeup_protocols &= ~protocols; + mutex_unlock(&dev->lock); +} + /* * Used to (un)register raw event clients */ @@ -337,33 +347,16 @@ EXPORT_SYMBOL(ir_raw_handler_register); void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) { struct ir_raw_event_ctrl *raw; + u64 protocols = ir_raw_handler->protocols; mutex_lock(&ir_raw_handler_lock); list_del(&ir_raw_handler->list); - if (ir_raw_handler->raw_unregister) - list_for_each_entry(raw, &ir_raw_client_list, list) + list_for_each_entry(raw, &ir_raw_client_list, list) { + ir_raw_disable_protocols(raw->dev, protocols); + if (ir_raw_handler->raw_unregister) ir_raw_handler->raw_unregister(raw->dev); - available_protocols &= ~ir_raw_handler->protocols; + } + available_protocols &= ~protocols; mutex_unlock(&ir_raw_handler_lock); } EXPORT_SYMBOL(ir_raw_handler_unregister); - -void ir_raw_init(void) -{ - /* Load the decoder modules */ - - load_nec_decode(); - load_rc5_decode(); - load_rc6_decode(); - load_jvc_decode(); - load_sony_decode(); - load_sanyo_decode(); - load_sharp_decode(); - load_mce_kbd_decode(); - load_lirc_codec(); - load_xmp_decode(); - - /* If needed, we may later add some init code. In this case, - it is needed to change the CONFIG_MODULE test at rc-core.h - */ -} diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 3f0f71adabb4..1042fa331a07 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -61,7 +61,7 @@ struct rc_map *rc_map_get(const char *name) struct rc_map_list *map; map = seek_rc_map(name); -#ifdef MODULE +#ifdef CONFIG_MODULES if (!map) { int rc = request_module("%s", name); if (rc < 0) { @@ -777,30 +777,31 @@ static struct class rc_class = { * used by the sysfs protocols file. Note that the order * of the entries is relevant. */ -static struct { +static const struct { u64 type; - char *name; + const char *name; + const char *module_name; } proto_names[] = { - { RC_BIT_NONE, "none" }, - { RC_BIT_OTHER, "other" }, - { RC_BIT_UNKNOWN, "unknown" }, + { RC_BIT_NONE, "none", NULL }, + { RC_BIT_OTHER, "other", NULL }, + { RC_BIT_UNKNOWN, "unknown", NULL }, { RC_BIT_RC5 | - RC_BIT_RC5X, "rc-5" }, - { RC_BIT_NEC, "nec" }, + RC_BIT_RC5X, "rc-5", "ir-rc5-decoder" }, + { RC_BIT_NEC, "nec", "ir-nec-decoder" }, { RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | - RC_BIT_RC6_MCE, "rc-6" }, - { RC_BIT_JVC, "jvc" }, + RC_BIT_RC6_MCE, "rc-6", "ir-rc6-decoder" }, + { RC_BIT_JVC, "jvc", "ir-jvc-decoder" }, { RC_BIT_SONY12 | RC_BIT_SONY15 | - RC_BIT_SONY20, "sony" }, - { RC_BIT_RC5_SZ, "rc-5-sz" }, - { RC_BIT_SANYO, "sanyo" }, - { RC_BIT_SHARP, "sharp" }, - { RC_BIT_MCE_KBD, "mce_kbd" }, - { RC_BIT_XMP, "xmp" }, + RC_BIT_SONY20, "sony", "ir-sony-decoder" }, + { RC_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" }, + { RC_BIT_SANYO, "sanyo", "ir-sanyo-decoder" }, + { RC_BIT_SHARP, "sharp", "ir-sharp-decoder" }, + { RC_BIT_MCE_KBD, "mce_kbd", "ir-mce_kbd-decoder" }, + { RC_BIT_XMP, "xmp", "ir-xmp-decoder" }, }; /** @@ -979,6 +980,48 @@ static int parse_protocol_change(u64 *protocols, const char *buf) return count; } +static void ir_raw_load_modules(u64 *protocols) + +{ + u64 available; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(proto_names); i++) { + if (proto_names[i].type == RC_BIT_NONE || + proto_names[i].type & (RC_BIT_OTHER | RC_BIT_UNKNOWN)) + continue; + + available = ir_raw_get_allowed_protocols(); + if (!(*protocols & proto_names[i].type & ~available)) + continue; + + if (!proto_names[i].module_name) { + pr_err("Can't enable IR protocol %s\n", + proto_names[i].name); + *protocols &= ~proto_names[i].type; + continue; + } + + ret = request_module("%s", proto_names[i].module_name); + if (ret < 0) { + pr_err("Couldn't load IR protocol module %s\n", + proto_names[i].module_name); + *protocols &= ~proto_names[i].type; + continue; + } + msleep(20); + available = ir_raw_get_allowed_protocols(); + if (!(*protocols & proto_names[i].type & ~available)) + continue; + + pr_err("Loaded IR protocol module %s, \ + but protocol %s still not available\n", + proto_names[i].module_name, + proto_names[i].name); + *protocols &= ~proto_names[i].type; + } +} + /** * store_protocols() - changes the current/wakeup IR protocol(s) * @device: the device descriptor @@ -1045,6 +1088,9 @@ static ssize_t store_protocols(struct device *device, goto out; } + if (dev->driver_type == RC_DRIVER_IR_RAW) + ir_raw_load_modules(&new_protocols); + if (new_protocols != old_protocols) { *current_protocols = new_protocols; IR_dprintk(1, "Protocols changed to 0x%llx\n", @@ -1420,17 +1466,13 @@ int rc_register_device(struct rc_dev *dev) dev->input_dev->rep[REP_PERIOD] = 125; path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); - printk(KERN_INFO "%s: %s as %s\n", - dev_name(&dev->dev), - dev->input_name ? dev->input_name : "Unspecified device", - path ? path : "N/A"); + dev_info(&dev->dev, "%s as %s\n", + dev->input_name ?: "Unspecified device", path ?: "N/A"); kfree(path); if (dev->driver_type == RC_DRIVER_IR_RAW) { - /* Load raw decoders, if they aren't already */ if (!raw_init) { - IR_dprintk(1, "Loading raw decoders\n"); - ir_raw_init(); + request_module_nowait("ir-lirc-codec"); raw_init = true; } /* calls ir_register_device so unlock mutex here*/ diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c index 37d040158dff..1fa0c9d1c508 100644 --- a/drivers/media/rc/st_rc.c +++ b/drivers/media/rc/st_rc.c @@ -16,6 +16,7 @@ #include <linux/reset.h> #include <media/rc-core.h> #include <linux/pinctrl/consumer.h> +#include <linux/pm_wakeirq.h> struct st_rc_device { struct device *dev; @@ -190,6 +191,9 @@ static void st_rc_hardware_init(struct st_rc_device *dev) static int st_rc_remove(struct platform_device *pdev) { struct st_rc_device *rc_dev = platform_get_drvdata(pdev); + + dev_pm_clear_wake_irq(&pdev->dev); + device_init_wakeup(&pdev->dev, false); clk_disable_unprepare(rc_dev->sys_clock); rc_unregister_device(rc_dev->rdev); return 0; @@ -298,22 +302,22 @@ static int st_rc_probe(struct platform_device *pdev) rdev->map_name = RC_MAP_LIRC; rdev->input_name = "ST Remote Control Receiver"; - /* enable wake via this device */ - device_set_wakeup_capable(dev, true); - device_set_wakeup_enable(dev, true); - ret = rc_register_device(rdev); if (ret < 0) goto clkerr; rc_dev->rdev = rdev; if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt, - IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) { + 0, IR_ST_NAME, rc_dev) < 0) { dev_err(dev, "IRQ %d register failed\n", rc_dev->irq); ret = -EINVAL; goto rcerr; } + /* enable wake via this device */ + device_init_wakeup(dev, true); + dev_pm_set_wake_irq(dev, rc_dev->irq); + /** * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW * lircd expects a long space first before a signal train to sync. diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 5a17cb88ff27..815243c65bc3 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -34,6 +34,7 @@ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/ktime.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> @@ -96,8 +97,8 @@ struct streamzap_ir { /* sum of signal lengths received since signal start */ unsigned long sum; /* start time of signal; necessary for gap tracking */ - struct timeval signal_last; - struct timeval signal_start; + ktime_t signal_last; + ktime_t signal_start; bool timeout_enabled; char name[128]; @@ -136,20 +137,18 @@ static void sz_push_full_pulse(struct streamzap_ir *sz, DEFINE_IR_RAW_EVENT(rawir); if (sz->idle) { - long deltv; + int delta; sz->signal_last = sz->signal_start; - do_gettimeofday(&sz->signal_start); + sz->signal_start = ktime_get_real(); - deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec; + delta = ktime_us_delta(sz->signal_start, sz->signal_last); rawir.pulse = false; - if (deltv > 15) { + if (delta > (15 * USEC_PER_SEC)) { /* really long time */ rawir.duration = IR_MAX_DURATION; } else { - rawir.duration = (int)(deltv * 1000000 + - sz->signal_start.tv_usec - - sz->signal_last.tv_usec); + rawir.duration = delta; rawir.duration -= sz->sum; rawir.duration = US_TO_NS(rawir.duration); rawir.duration = (rawir.duration > IR_MAX_DURATION) ? @@ -428,7 +427,7 @@ static int streamzap_probe(struct usb_interface *intf, sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); #endif - do_gettimeofday(&sz->signal_start); + sz->signal_start = ktime_get_real(); /* Complete final initialisations */ usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in, diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c index 7830aef3db45..40f77685cc4a 100644 --- a/drivers/media/rc/sunxi-cir.c +++ b/drivers/media/rc/sunxi-cir.c @@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev) if (!ir) return -ENOMEM; + spin_lock_init(&ir->ir_lock); + if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir")) ir->fifo_size = 64; else diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c index 95ed46f2cd26..353b178becf6 100644 --- a/drivers/media/tuners/max2165.c +++ b/drivers/media/tuners/max2165.c @@ -385,7 +385,7 @@ static const struct dvb_tuner_ops max2165_tuner_ops = { .info = { .name = "Maxim MAX2165", .frequency_min = 470000000, - .frequency_max = 780000000, + .frequency_max = 862000000, .frequency_step = 50000, }, diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c index 9e9c5eb4cb66..6457ac91ef09 100644 --- a/drivers/media/tuners/mt2063.c +++ b/drivers/media/tuners/mt2063.c @@ -225,7 +225,6 @@ struct mt2063_state { const struct mt2063_config *config; struct dvb_tuner_ops ops; struct dvb_frontend *frontend; - struct tuner_state status; u32 frequency; u32 srate; diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index ce157edd45fa..0e1ca2b00e61 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe) len = fw->data[fw->size - remaining]; if (len > SI2157_ARGLEN) { dev_err(&client->dev, "Bad firmware length\n"); + ret = -EINVAL; goto err_release_firmware; } memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c index fcbb49757614..0d4ac5947f3a 100644 --- a/drivers/media/usb/airspy/airspy.c +++ b/drivers/media/usb/airspy/airspy.c @@ -134,7 +134,7 @@ struct airspy { int urbs_submitted; /* USB control message buffer */ - #define BUF_SIZE 24 + #define BUF_SIZE 128 u8 buf[BUF_SIZE]; /* Current configuration */ @@ -316,7 +316,7 @@ static void airspy_urb_complete(struct urb *urb) len = airspy_convert_stream(s, ptr, urb->transfer_buffer, urb->actual_length); vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len); - v4l2_get_timestamp(&fbuf->vb.timestamp); + fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); fbuf->vb.sequence = s->sequence++; vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); } @@ -488,7 +488,7 @@ static void airspy_disconnect(struct usb_interface *intf) /* Videobuf2 operations */ static int airspy_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, + unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct airspy *s = vb2_get_drv_priv(vq); diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 07d08c49f4d4..5a28ce3a1d49 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -198,6 +198,7 @@ int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap) pr_info("%s: firmware: %s loaded with success\n", DRIVER_NAME, fw1); release_firmware(firmware); + firmware = NULL; /* wait for boot to complete */ mdelay(100); diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c index 130c8b49bf7f..b4efc103ae57 100644 --- a/drivers/media/usb/au0828/au0828-vbi.c +++ b/drivers/media/usb/au0828/au0828-vbi.c @@ -30,23 +30,17 @@ /* ------------------------------------------------------------------ */ -static int vbi_queue_setup(struct vb2_queue *vq, const void *parg, +static int vbi_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct au0828_dev *dev = vb2_get_drv_priv(vq); - unsigned long img_size = dev->vbi_width * dev->vbi_height * 2; - unsigned long size; - - size = fmt ? (fmt->fmt.vbi.samples_per_line * - (fmt->fmt.vbi.count[0] + fmt->fmt.vbi.count[1])) : img_size; - if (size < img_size) - return -EINVAL; + unsigned long size = dev->vbi_width * dev->vbi_height * 2; + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; - return 0; } diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 45c622e234f7..0a725a161dd6 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -314,7 +314,7 @@ static inline void buffer_filled(struct au0828_dev *dev, vb->sequence = dev->vbi_frame_count++; vb->field = V4L2_FIELD_INTERLACED; - v4l2_get_timestamp(&vb->timestamp); + vb->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE); } @@ -638,19 +638,15 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb) return rc; } -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct au0828_dev *dev = vb2_get_drv_priv(vq); - unsigned long img_size = dev->height * dev->bytesperline; - unsigned long size; - - size = fmt ? fmt->fmt.pix.sizeimage : img_size; - if (size < img_size) - return -EINVAL; + unsigned long size = dev->height * dev->bytesperline; + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 351a78a84c3d..c1aa1ab2ece9 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -890,8 +890,7 @@ static void cpia2_usb_disconnect(struct usb_interface *intf) DBG("Wakeup waiting processes\n"); cam->curbuff->status = FRAME_READY; cam->curbuff->length = 0; - if (waitqueue_active(&cam->wq_stream)) - wake_up_interruptible(&cam->wq_stream); + wake_up_interruptible(&cam->wq_stream); } DBG("Releasing interface\n"); diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 47a98a2014a5..48643b94e694 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -37,7 +37,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include <media/tuner.h> #define CX231xx_FIRM_IMAGE_SIZE 376836 @@ -1492,6 +1492,27 @@ static struct videobuf_queue_ops cx231xx_qops = { /* ------------------------------------------------------------------ */ +static int vidioc_cropcap(struct file *file, void *priv, + struct v4l2_cropcap *cc) +{ + struct cx231xx_fh *fh = priv; + struct cx231xx *dev = fh->dev; + bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50; + + if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + cc->bounds.left = 0; + cc->bounds.top = 0; + cc->bounds.width = dev->ts1.width; + cc->bounds.height = dev->ts1.height; + cc->defrect = cc->bounds; + cc->pixelaspect.numerator = is_50hz ? 54 : 11; + cc->pixelaspect.denominator = is_50hz ? 59 : 10; + + return 0; +} + static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm) { struct cx231xx_fh *fh = file->private_data; @@ -1834,6 +1855,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { .vidioc_g_input = cx231xx_g_input, .vidioc_s_input = cx231xx_s_input, .vidioc_s_ctrl = vidioc_s_ctrl, + .vidioc_cropcap = vidioc_cropcap, .vidioc_querycap = cx231xx_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, @@ -1901,7 +1923,7 @@ static int cx231xx_s_audio_sampling_freq(struct cx2341x_handler *cxhdl, u32 idx) return 0; } -static struct cx2341x_handler_ops cx231xx_ops = { +static const struct cx2341x_handler_ops cx231xx_ops = { /* needed for the video clock freq */ .s_audio_sampling_freq = cx231xx_s_audio_sampling_freq, /* needed for setting up the video resolution */ diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 4a117a58c39a..89dc695c696e 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -30,7 +30,7 @@ #include <media/tveeprom.h> #include <media/v4l2-common.h> -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include "dvb-usb-ids.h" #include "xc5000.h" #include "tda18271.h" @@ -352,7 +352,7 @@ struct cx231xx_board cx231xx_boards[] = { .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_1, - .demod_i2c_master = I2C_2, + .demod_i2c_master = I2C_1_MUX_1, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_NTSC, @@ -713,7 +713,7 @@ struct cx231xx_board cx231xx_boards[] = { .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, - .demod_i2c_master = I2C_2, + .demod_i2c_master = I2C_1_MUX_3, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, @@ -752,7 +752,7 @@ struct cx231xx_board cx231xx_boards[] = { .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, - .demod_i2c_master = I2C_2, + .demod_i2c_master = I2C_1_MUX_3, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, @@ -791,7 +791,7 @@ struct cx231xx_board cx231xx_boards[] = { .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, - .demod_i2c_master = I2C_2, + .demod_i2c_master = I2C_1_MUX_3, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_NTSC, diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c index a2fd49b6be83..f497888d94bf 100644 --- a/drivers/media/usb/cx231xx/cx231xx-core.c +++ b/drivers/media/usb/cx231xx/cx231xx-core.c @@ -914,6 +914,7 @@ EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc); */ void cx231xx_uninit_bulk(struct cx231xx *dev) { + struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; @@ -931,7 +932,7 @@ void cx231xx_uninit_bulk(struct cx231xx *dev) if (dev->video_mode.bulk_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, - dev->video_mode.isoc_ctl. + dev->video_mode.bulk_ctl. transfer_buffer[i], urb->transfer_dma); } @@ -943,10 +944,12 @@ void cx231xx_uninit_bulk(struct cx231xx *dev) kfree(dev->video_mode.bulk_ctl.urb); kfree(dev->video_mode.bulk_ctl.transfer_buffer); + kfree(dma_q->p_left_data); dev->video_mode.bulk_ctl.urb = NULL; dev->video_mode.bulk_ctl.transfer_buffer = NULL; dev->video_mode.bulk_ctl.num_bufs = 0; + dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); @@ -1196,6 +1199,16 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, sb_size, cx231xx_bulk_irq_callback, dma_q); } + /* clear halt */ + rc = usb_clear_halt(dev->udev, dev->video_mode.bulk_ctl.urb[0]->pipe); + if (rc < 0) { + dev_err(dev->dev, + "failed to clear USB bulk endpoint stall/halt condition (error=%i)\n", + rc); + cx231xx_uninit_bulk(dev); + return rc; + } + init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c index 66ee161fc7ba..e3594b9fab4a 100644 --- a/drivers/media/usb/cx231xx/cx231xx-dvb.c +++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c @@ -725,7 +725,7 @@ static int dvb_init(struct cx231xx *dev) dev->dvb->frontend = dvb_attach(lgdt3305_attach, &hcw_lgdt3305_config, - tuner_i2c); + demod_i2c); if (dev->dvb->frontend == NULL) { dev_err(dev->dev, @@ -746,7 +746,7 @@ static int dvb_init(struct cx231xx *dev) dev->dvb->frontend = dvb_attach(si2165_attach, &hauppauge_930C_HD_1113xx_si2165_config, - tuner_i2c + demod_i2c ); if (dev->dvb->frontend == NULL) { @@ -779,7 +779,7 @@ static int dvb_init(struct cx231xx *dev) dev->dvb->frontend = dvb_attach(si2165_attach, &pctv_quatro_stick_1114xx_si2165_config, - tuner_i2c + demod_i2c ); if (dev->dvb->frontend == NULL) { @@ -835,7 +835,7 @@ static int dvb_init(struct cx231xx *dev) dev->dvb->frontend = dvb_attach(lgdt3306a_attach, &hauppauge_955q_lgdt3306a_config, - tuner_i2c + demod_i2c ); if (dev->dvb->frontend == NULL) { diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c index a08014d20a5c..15bb573b78ac 100644 --- a/drivers/media/usb/cx231xx/cx231xx-vbi.c +++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c @@ -32,7 +32,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <media/tuner.h> #include "cx231xx-vbi.h" diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index d0d8f08e37c8..a70850fe6235 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -36,7 +36,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <media/tuner.h> #include "dvb_frontend.h" @@ -1444,6 +1444,7 @@ static int vidioc_cropcap(struct file *file, void *priv, { struct cx231xx_fh *fh = priv; struct cx231xx *dev = fh->dev; + bool is_50hz = dev->norm & V4L2_STD_625_50; if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; @@ -1453,8 +1454,8 @@ static int vidioc_cropcap(struct file *file, void *priv, cc->bounds.width = dev->width; cc->bounds.height = dev->height; cc->defrect = cc->bounds; - cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */ - cc->pixelaspect.denominator = 59; + cc->pixelaspect.numerator = is_50hz ? 54 : 11; + cc->pixelaspect.denominator = is_50hz ? 59 : 10; return 0; } diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h index 54790fbe8fdc..ec6d3f5bc36d 100644 --- a/drivers/media/usb/cx231xx/cx231xx.h +++ b/drivers/media/usb/cx231xx/cx231xx.h @@ -30,14 +30,14 @@ #include <linux/mutex.h> #include <linux/usb.h> -#include <media/cx2341x.h> +#include <media/drv-intf/cx2341x.h> #include <media/videobuf-vmalloc.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> #include <media/rc-core.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #include <media/videobuf-dvb.h> #include "cx231xx-reg.h" diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig index 9facc92c8dea..3dc8ef004f8b 100644 --- a/drivers/media/usb/dvb-usb-v2/Kconfig +++ b/drivers/media/usb/dvb-usb-v2/Kconfig @@ -9,7 +9,7 @@ config DVB_USB_V2 <file:Documentation/dvb/README.dvb-usb>. For a complete list of supported USB devices see the LinuxTV DVB Wiki: - <http://www.linuxtv.org/wiki/index.php/DVB_USB> + <https://linuxtv.org/wiki/index.php/DVB_USB> Say Y if you own a USB DVB device. diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c index ea3753653368..84f6de6fa07d 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c @@ -35,7 +35,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); struct mxl111sf_demod_state { struct mxl111sf_state *mxl_state; - struct mxl111sf_demod_config *cfg; + const struct mxl111sf_demod_config *cfg; struct dvb_frontend fe; }; @@ -579,7 +579,7 @@ static struct dvb_frontend_ops mxl111sf_demod_ops = { }; struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state, - struct mxl111sf_demod_config *cfg) + const struct mxl111sf_demod_config *cfg) { struct mxl111sf_demod_state *state = NULL; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h index 0bd83e52669c..7065aca81252 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h @@ -35,11 +35,11 @@ struct mxl111sf_demod_config { #if IS_ENABLED(CONFIG_DVB_USB_MXL111SF) extern struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state, - struct mxl111sf_demod_config *cfg); + const struct mxl111sf_demod_config *cfg); #else static inline struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state, - struct mxl111sf_demod_config *cfg) + const struct mxl111sf_demod_config *cfg) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index bec12b0e076b..1710f9038d75 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -288,9 +288,9 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe) err = mxl1x1sf_set_device_mode(state, adap_state->device_mode); mxl_fail(err); - mxl111sf_enable_usb_output(state); + err = mxl111sf_enable_usb_output(state); mxl_fail(err); - mxl1x1sf_top_master_ctrl(state, 1); + err = mxl1x1sf_top_master_ctrl(state, 1); mxl_fail(err); if ((MXL111SF_GPIO_MOD_DVBT != adap_state->gpio_mode) && @@ -731,7 +731,7 @@ fail: return ret; } -static struct mxl111sf_demod_config mxl_demod_config = { +static const struct mxl111sf_demod_config mxl_demod_config = { .read_reg = mxl111sf_read_reg, .write_reg = mxl111sf_write_reg, .program_regs = mxl111sf_ctrl_program_regs, diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 5a503a6bb8c5..eb5787a3191e 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -181,11 +181,17 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { /* method 1 - integrated demod */ - req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); - req.index = CMD_DEMOD_RD | dev->page; - req.size = msg[1].len; - req.data = &msg[1].buf[0]; - ret = rtl28xxu_ctrl_msg(d, &req); + if (msg[0].buf[0] == 0x00) { + /* return demod page from driver cache */ + msg[1].buf[0] = dev->page; + ret = 0; + } else { + req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); + req.index = CMD_DEMOD_RD | dev->page; + req.size = msg[1].len; + req.data = &msg[1].buf[0]; + ret = rtl28xxu_ctrl_msg(d, &req); + } } else if (msg[0].len < 2) { /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig index 128eee61570d..f03b0b70c901 100644 --- a/drivers/media/usb/dvb-usb/Kconfig +++ b/drivers/media/usb/dvb-usb/Kconfig @@ -9,7 +9,7 @@ config DVB_USB <file:Documentation/dvb/README.dvb-usb>. For a complete list of supported USB devices see the LinuxTV DVB Wiki: - <http://www.linuxtv.org/wiki/index.php/DVB_USB> + <https://linuxtv.org/wiki/index.php/DVB_USB> Say Y if you own a USB DVB device. diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index ed0b3a87983e..b58acd3fcd99 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -21,7 +21,7 @@ #include <linux/i2c.h> #include <media/soc_camera.h> -#include <media/mt9v011.h> +#include <media/i2c/mt9v011.h> #include <media/v4l2-clk.h> #include <media/v4l2-common.h> @@ -322,7 +322,7 @@ int em28xx_detect_sensor(struct em28xx *dev) int em28xx_init_camera(struct em28xx *dev) { - char clk_name[V4L2_SUBDEV_NAME_SIZE]; + char clk_name[V4L2_CLK_NAME_SIZE]; struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus]; struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus]; struct em28xx_v4l2 *v4l2 = dev->v4l2; diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 394004607059..a1b6ef5894a6 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -30,10 +30,10 @@ #include <linux/i2c.h> #include <linux/usb.h> #include <media/tuner.h> -#include <media/msp3400.h> -#include <media/saa7115.h> -#include <media/tvp5150.h> -#include <media/tvaudio.h> +#include <media/drv-intf/msp3400.h> +#include <media/i2c/saa7115.h> +#include <media/i2c/tvp5150.h> +#include <media/i2c/tvaudio.h> #include <media/i2c-addr.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> @@ -1051,8 +1051,12 @@ struct em28xx_board em28xx_boards[] = { }, [EM2870_BOARD_TERRATEC_XS_MT2060] = { .name = "Terratec Cinergy T XS (MT2060)", - .valid = EM28XX_BOARD_NOT_VALIDATED, + .xclk = EM28XX_XCLK_IR_RC5_MODE | + EM28XX_XCLK_FREQUENCY_12MHZ, + .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .tuner_type = TUNER_ABSENT, /* MT2060 */ + .has_dvb = 1, + .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_KWORLD_350U] = { .name = "Kworld 350 U DVB-T", @@ -2368,7 +2372,7 @@ struct usb_device_id em28xx_id_table[] = { { USB_DEVICE(0x0ccd, 0x0042), .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x0043), - .driver_info = EM2870_BOARD_TERRATEC_XS }, + .driver_info = EM2870_BOARD_TERRATEC_XS_MT2060 }, { USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS }, { USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */ @@ -2471,6 +2475,8 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, + { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */ + .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x0413, 0x6f07), .driver_info = EM2861_BOARD_LEADTEK_VC100 }, { USB_DEVICE(0xeb1a, 0x8179), diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index 357be76c7a55..bf5c24467c65 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -38,6 +38,7 @@ #include "lgdt3305.h" #include "zl10353.h" #include "s5h1409.h" +#include "mt2060.h" #include "mt352.h" #include "mt352_priv.h" /* FIXME */ #include "tda1002x.h" @@ -815,6 +816,10 @@ static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = { .parallel_ts = 1, }; +static struct mt2060_config em28xx_mt2060_config = { + .i2c_address = 0x60, +}; + static struct qt1010_config em28xx_qt1010_config = { .i2c_address = 0x62 }; @@ -1142,6 +1147,16 @@ static int em28xx_dvb_init(struct em28xx *dev) goto out_free; } break; + case EM2870_BOARD_TERRATEC_XS_MT2060: + dvb->fe[0] = dvb_attach(zl10353_attach, + &em28xx_zl10353_no_i2c_gate_dev, + &dev->i2c_adap[dev->def_i2c_bus]); + if (dvb->fe[0] != NULL) { + dvb_attach(mt2060_attach, dvb->fe[0], + &dev->i2c_adap[dev->def_i2c_bus], + &em28xx_mt2060_config, 1220); + } + break; case EM2870_BOARD_KWORLD_355U: dvb->fe[0] = dvb_attach(zl10353_attach, &em28xx_zl10353_no_i2c_gate_dev, diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c index e23c285b3108..fe94c9225dd7 100644 --- a/drivers/media/usb/em28xx/em28xx-vbi.c +++ b/drivers/media/usb/em28xx/em28xx-vbi.c @@ -31,26 +31,22 @@ /* ------------------------------------------------------------------ */ -static int vbi_queue_setup(struct vb2_queue *vq, const void *parg, +static int vbi_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct em28xx *dev = vb2_get_drv_priv(vq); struct em28xx_v4l2 *v4l2 = dev->v4l2; - unsigned long size; + unsigned long size = v4l2->vbi_width * v4l2->vbi_height * 2; - if (fmt) - size = fmt->fmt.pix.sizeimage; - else - size = v4l2->vbi_width * v4l2->vbi_height * 2; - - if (0 == *nbuffers) - *nbuffers = 32; if (*nbuffers < 2) *nbuffers = 2; - if (*nbuffers > 32) - *nbuffers = 32; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } *nplanes = 1; sizes[0] = size; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 6a3cf342e087..0e86ff423c49 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -43,7 +43,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include <media/v4l2-clk.h> -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <media/tuner.h> #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ @@ -438,7 +438,7 @@ static inline void finish_buffer(struct em28xx *dev, buf->vb.field = V4L2_FIELD_NONE; else buf->vb.field = V4L2_FIELD_INTERLACED; - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } @@ -871,30 +871,19 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type) Videobuf2 operations ------------------------------------------------------------------*/ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct em28xx *dev = vb2_get_drv_priv(vq); struct em28xx_v4l2 *v4l2 = dev->v4l2; - unsigned long size; - - if (fmt) - size = fmt->fmt.pix.sizeimage; - else - size = + unsigned long size = (v4l2->width * v4l2->height * v4l2->format->depth + 7) >> 3; - if (size == 0) - return -EINVAL; - - if (0 == *nbuffers) - *nbuffers = 32; - + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; - return 0; } diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 76bf8ba372b3..8ff066c977d9 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -40,7 +40,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #include <media/rc-core.h> #include "tuner-xc2028.h" #include "xc5000.h" diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c index ae1cfa792c58..05b1126f263e 100644 --- a/drivers/media/usb/go7007/go7007-driver.c +++ b/drivers/media/usb/go7007/go7007-driver.c @@ -466,7 +466,7 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf else go7007_set_motion_regions(go, vb, 0); - v4l2_get_timestamp(&vb->vb.timestamp); + vb->vb.vb2_buf.timestamp = ktime_get_ns(); vb_tmp = vb; spin_lock(&go->spinlock); list_del(&vb->list); diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index 4857c467e76c..3dbf14c85c5c 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -23,9 +23,9 @@ #include <linux/usb.h> #include <linux/i2c.h> #include <asm/byteorder.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <media/tuner.h> -#include <media/uda1342.h> +#include <media/i2c/uda1342.h> #include "go7007-priv.h" @@ -1289,7 +1289,7 @@ static int go7007_usb_probe(struct usb_interface *intf, /* Allocate the URBs and buffers for receiving the audio stream */ if ((board->flags & GO7007_USB_EZUSB) && - (board->flags & GO7007_BOARD_HAS_AUDIO)) { + (board->main_info.flags & GO7007_BOARD_HAS_AUDIO)) { for (i = 0; i < 8; ++i) { usb->audio_urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (usb->audio_urbs[i] == NULL) diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c index f3d187db9368..358c1c186d03 100644 --- a/drivers/media/usb/go7007/go7007-v4l2.c +++ b/drivers/media/usb/go7007/go7007-v4l2.c @@ -30,7 +30,7 @@ #include <media/v4l2-subdev.h> #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include "go7007-priv.h" @@ -369,7 +369,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, } static int go7007_queue_setup(struct vb2_queue *q, - const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 146071b8e116..bfff1d1c70ab 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, struct v4l2_fract *tpf = &cp->timeperframe; struct sd *sd = (struct sd *) gspca_dev; - /* Set requested framerate */ - sd->frame_rate = tpf->denominator / tpf->numerator; + if (tpf->numerator == 0 || tpf->denominator == 0) + /* Set default framerate */ + sd->frame_rate = 30; + else + /* Set requested framerate */ + sd->frame_rate = tpf->denominator / tpf->numerator; + if (gspca_dev->streaming) set_frame_rate(gspca_dev); diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c index c70ff406b07a..c028a5c2438e 100644 --- a/drivers/media/usb/gspca/topro.c +++ b/drivers/media/usb/gspca/topro.c @@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, struct v4l2_fract *tpf = &cp->timeperframe; int fr, i; - sd->framerate = tpf->denominator / tpf->numerator; + if (tpf->numerator == 0 || tpf->denominator == 0) + sd->framerate = 30; + else + sd->framerate = tpf->denominator / tpf->numerator; + if (gspca_dev->streaming) setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c index e05bfec90f46..9e700caf0d66 100644 --- a/drivers/media/usb/hackrf/hackrf.c +++ b/drivers/media/usb/hackrf/hackrf.c @@ -24,6 +24,15 @@ #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> +/* + * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too + * strong signal or transmitting to bad antenna. + * Set RF gain control to 'grabbed' state by default for sure. + */ +static bool hackrf_enable_rf_gain_ctrl; +module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); +MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); + /* HackRF USB API commands (from HackRF Library) */ enum { CMD_SET_TRANSCEIVER_MODE = 0x01, @@ -517,7 +526,7 @@ static void hackrf_urb_complete_in(struct urb *urb) urb->transfer_buffer, len); vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, len); buffer->vb.sequence = dev->sequence++; - v4l2_get_timestamp(&buffer->vb.timestamp); + buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); @@ -562,7 +571,7 @@ static void hackrf_urb_complete_out(struct urb *urb) vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), len); urb->actual_length = len; buffer->vb.sequence = dev->sequence++; - v4l2_get_timestamp(&buffer->vb.timestamp); + buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); @@ -750,7 +759,7 @@ static void hackrf_return_all_buffers(struct vb2_queue *vq, } static int hackrf_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, + unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); @@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf, dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_rx; } + v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); /* Register controls for transmitter */ @@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf, dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_tx; } + v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); /* Register the v4l2_device structure */ @@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx: err_kfree: kfree(dev); err: - dev_dbg(dev->dev, "failed=%d\n", ret); + dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index d8d8c0f519fc..7dee22deebf3 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -642,7 +642,7 @@ static int vidioc_s_dv_timings(struct file *file, void *_fh, if (dev->status != STATUS_IDLE) return -EBUSY; for (i = 0; i < ARRAY_SIZE(hdpvr_dv_timings); i++) - if (v4l2_match_dv_timings(timings, hdpvr_dv_timings + i, 0)) + if (v4l2_match_dv_timings(timings, hdpvr_dv_timings + i, 0, false)) break; if (i == ARRAY_SIZE(hdpvr_dv_timings)) return -EINVAL; diff --git a/drivers/media/usb/hdpvr/hdpvr.h b/drivers/media/usb/hdpvr/hdpvr.h index a3194304182d..78e815441f95 100644 --- a/drivers/media/usb/hdpvr/hdpvr.h +++ b/drivers/media/usb/hdpvr/hdpvr.h @@ -17,7 +17,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #define HDPVR_MAX 8 #define HDPVR_I2C_MAX_SIZE 128 diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index e06a21a4fbd9..c104315fdc17 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -616,7 +616,6 @@ static int msi2500_querycap(struct file *file, void *fh, /* Videobuf2 operations */ static int msi2500_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c index 45276c628482..5f953d837bf1 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c @@ -23,7 +23,7 @@ #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" #include <linux/videodev2.h> -#include <media/msp3400.h> +#include <media/drv-intf/msp3400.h> #include <media/v4l2-common.h> diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c index 1a81aa70509b..7d675fae1846 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c @@ -32,7 +32,7 @@ #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" -#include <media/cx25840.h> +#include <media/drv-intf/cx25840.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <linux/errno.h> diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h index 1f9c02801cee..60141b16d731 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h @@ -39,8 +39,8 @@ #include "pvrusb2-hdw.h" #include "pvrusb2-io.h" #include <media/v4l2-device.h> -#include <media/cx2341x.h> -#include <media/ir-kbd-i2c.h> +#include <media/drv-intf/cx2341x.h> +#include <media/i2c/ir-kbd-i2c.h> #include "pvrusb2-devattr.h" /* Legal values for PVR2_CID_HSM */ diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c index 4baa9d632a4e..14321d0a1833 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c @@ -20,7 +20,7 @@ #include <linux/i2c.h> #include <linux/module.h> -#include <media/ir-kbd-i2c.h> +#include <media/i2c/ir-kbd-i2c.h> #include "pvrusb2-i2c-core.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 1c5f85bf7ed4..81f788b7b242 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -628,6 +628,7 @@ static int pvr2_g_ext_ctrls(struct file *file, void *priv, struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; struct v4l2_ext_control *ctrl; + struct pvr2_ctrl *cptr; unsigned int idx; int val; int ret; @@ -635,8 +636,15 @@ static int pvr2_g_ext_ctrls(struct file *file, void *priv, ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; - ret = pvr2_ctrl_get_value( - pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id), &val); + cptr = pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id); + if (cptr) { + if (ctls->which == V4L2_CTRL_WHICH_DEF_VAL) + pvr2_ctrl_get_def(cptr, &val); + else + ret = pvr2_ctrl_get_value(cptr, &val); + } else + ret = -EINVAL; + if (ret) { ctls->error_idx = idx; return ret; @@ -658,6 +666,10 @@ static int pvr2_s_ext_ctrls(struct file *file, void *priv, unsigned int idx; int ret; + /* Default value cannot be changed */ + if (ctls->which == V4L2_CTRL_WHICH_DEF_VAL) + return -EINVAL; + ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c index 139b39740534..105123ab36aa 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c @@ -35,7 +35,7 @@ #include "pvrusb2-debug.h" #include <linux/videodev2.h> #include <media/v4l2-common.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <linux/errno.h> struct routing_scheme { diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index b79c36fd8cd2..086cf1c7bd7d 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -316,8 +316,7 @@ static void pwc_isoc_handler(struct urb *urb) struct pwc_frame_buf *fbuf = pdev->fill_buf; if (pdev->vsync == 1) { - v4l2_get_timestamp( - &fbuf->vb.timestamp); + fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); pdev->vsync = 2; } @@ -571,7 +570,7 @@ static void pwc_video_release(struct v4l2_device *v) /***************************************************************************/ /* Videobuf2 operations */ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index e7acb12ad21d..9acdaa3716fb 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c @@ -574,7 +574,7 @@ static void s2255_got_frame(struct s2255_vc *vc, int jpgsize) buf = list_entry(vc->buf_list.next, struct s2255_buffer, list); list_del(&buf->list); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.field = vc->field; buf->vb.sequence = vc->frame_count; spin_unlock_irqrestore(&vc->qlock, flags); @@ -660,7 +660,7 @@ static void s2255_fillbuff(struct s2255_vc *vc, Videobuf operations ------------------------------------------------------------------*/ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index 1b6836f15370..bc029478065a 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -34,7 +34,7 @@ #include <linux/usb.h> #include <linux/mm.h> #include <linux/vmalloc.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include "stk1160.h" #include "stk1160-reg.h" diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index 0bd34f1e7fa9..77131fd614a5 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -33,7 +33,7 @@ #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include "stk1160.h" #include "stk1160-reg.h" @@ -664,7 +664,7 @@ static const struct v4l2_ioctl_ops stk1160_ioctl_ops = { /* * Videobuf2 operations */ -static int queue_setup(struct vb2_queue *vq, const void *parg, +static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c index 75654e676e80..46191d5262eb 100644 --- a/drivers/media/usb/stk1160/stk1160-video.c +++ b/drivers/media/usb/stk1160/stk1160-video.c @@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev) buf->vb.sequence = dev->sequence++; buf->vb.field = V4L2_FIELD_INTERLACED; buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused; - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c index 2e8c3afe4ec4..8902ee36bc94 100644 --- a/drivers/media/usb/tm6000/tm6000-cards.c +++ b/drivers/media/usb/tm6000/tm6000-cards.c @@ -26,7 +26,7 @@ #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/tuner.h> -#include <media/tvaudio.h> +#include <media/i2c/tvaudio.h> #include <media/i2c-addr.h> #include <media/rc-map.h> diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index a5de46f04247..4e36e24cb3a6 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1606,7 +1606,7 @@ static int fe_send_command(struct dvb_frontend* fe, const u8 command, return ttusb_dec_send_command(dec, command, param_length, params, result_length, cmd_result); } -static struct ttusbdecfe_config fe_config = { +static const struct ttusbdecfe_config fe_config = { .send_command = fe_send_command }; diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index e645c9df2d94..4ebb33943f9a 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -322,7 +322,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk) buf->vb.field = V4L2_FIELD_INTERLACED; buf->vb.sequence = usbtv->sequence++; - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); @@ -599,19 +599,18 @@ static struct v4l2_file_operations usbtv_fops = { }; static int usbtv_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *nbuffers, + unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct usbtv *usbtv = vb2_get_drv_priv(vq); unsigned size = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32); if (vq->num_buffers + *nbuffers < 2) *nbuffers = 2 - vq->num_buffers; + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; - if (fmt && fmt->fmt.pix.sizeimage < size) - return -EINVAL; - sizes[0] = fmt ? fmt->fmt.pix.sizeimage : size; + sizes[0] = size; return 0; } diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c index dc3b4d5155c5..1ea04e75fb36 100644 --- a/drivers/media/usb/usbvision/usbvision-core.c +++ b/drivers/media/usb/usbvision/usbvision-core.c @@ -37,7 +37,7 @@ #include <linux/videodev2.h> #include <linux/i2c.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <media/v4l2-common.h> #include <media/tuner.h> diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index b693206f66dd..de9ff3bb8edd 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -59,7 +59,7 @@ #include <linux/videodev2.h> #include <linux/i2c.h> -#include <media/saa7115.h> +#include <media/i2c/saa7115.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> @@ -1461,11 +1461,32 @@ static int usbvision_probe(struct usb_interface *intf, printk(KERN_INFO "%s: %s found\n", __func__, usbvision_device_data[model].model_string); + /* + * this is a security check. + * an exploit using an incorrect bInterfaceNumber is known + */ + if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) + return -ENODEV; + if (usbvision_device_data[model].interface >= 0) interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; - else + else if (ifnum < dev->actconfig->desc.bNumInterfaces) interface = &dev->actconfig->interface[ifnum]->altsetting[0]; + else { + dev_err(&intf->dev, "interface %d is invalid, max is %d\n", + ifnum, dev->actconfig->desc.bNumInterfaces - 1); + ret = -ENODEV; + goto err_usb; + } + + if (interface->desc.bNumEndpoints < 2) { + dev_err(&intf->dev, "interface %d has %d endpoints, but must" + " have minimum 2\n", ifnum, interface->desc.bNumEndpoints); + ret = -ENODEV; + goto err_usb; + } endpoint = &interface->endpoint[1].desc; + if (!usb_endpoint_xfer_isoc(endpoint)) { dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n", __func__, ifnum); diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 3e59b288b8a8..c2ee6e39fd0c 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -227,7 +227,8 @@ static struct uvc_control_info uvc_ctrls[] = { .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE - | UVC_CTRL_FLAG_RESTORE, + | UVC_CTRL_FLAG_RESTORE + | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index d11fd6ac2df0..39abbafad796 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2540,7 +2540,8 @@ static struct usb_device_id uvc_ids[] = { .bInterfaceProtocol = 0, .driver_info = UVC_QUIRK_FORCE_Y8 }, /* Generic USB Video Class */ - { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, + { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) }, + { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) }, {} }; diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c index cfb868a48b5f..54394722756f 100644 --- a/drivers/media/usb/uvc/uvc_queue.c +++ b/drivers/media/usb/uvc/uvc_queue.c @@ -69,23 +69,19 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue, * videobuf2 queue operations */ -static int uvc_queue_setup(struct vb2_queue *vq, const void *parg, +static int uvc_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { - const struct v4l2_format *fmt = parg; struct uvc_video_queue *queue = vb2_get_drv_priv(vq); struct uvc_streaming *stream = uvc_queue_to_stream(queue); + unsigned size = stream->ctrl.dwMaxVideoFrameSize; /* Make sure the image size is large enough. */ - if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize) - return -EINVAL; - + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; - - sizes[0] = fmt ? fmt->fmt.pix.sizeimage - : stream->ctrl.dwMaxVideoFrameSize; - + sizes[0] = size; return 0; } diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 2764f43607c1..d7723ce772b3 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -983,6 +983,22 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh, unsigned int i; int ret; + if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) { + for (i = 0; i < ctrls->count; ++ctrl, ++i) { + struct v4l2_queryctrl qc = { .id = ctrl->id }; + + ret = uvc_query_v4l2_ctrl(chain, &qc); + if (ret < 0) { + ctrls->error_idx = i; + return ret; + } + + ctrl->value = qc.default_value; + } + + return 0; + } + ret = uvc_ctrl_begin(chain); if (ret < 0) return ret; @@ -1010,6 +1026,10 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle, unsigned int i; int ret; + /* Default value cannot be changed */ + if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) + return -EINVAL; + ret = uvc_ctrl_begin(chain); if (ret < 0) return ret; diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 2b276ab7764f..075a0fe77485 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -694,22 +694,19 @@ void uvc_video_clock_update(struct uvc_streaming *stream, ts.tv_nsec -= NSEC_PER_SEC; } - uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %lu.%06lu " - "buf ts %lu.%06lu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n", + uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %llu " + "buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n", stream->dev->name, sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), - y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC, - vbuf->timestamp.tv_sec, - (unsigned long)vbuf->timestamp.tv_usec, + y, timespec_to_ns(&ts), vbuf->vb2_buf.timestamp, x1, first->host_sof, first->dev_sof, x2, last->host_sof, last->dev_sof, y1, y2); /* Update the V4L2 buffer. */ - vbuf->timestamp.tv_sec = ts.tv_sec; - vbuf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; + vbuf->vb2_buf.timestamp = timespec_to_ns(&ts); done: - spin_unlock_irqrestore(&stream->clock.lock, flags); + spin_unlock_irqrestore(&clock->lock, flags); } /* ------------------------------------------------------------------------ @@ -1034,9 +1031,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream, buf->buf.field = V4L2_FIELD_NONE; buf->buf.sequence = stream->sequence; - buf->buf.timestamp.tv_sec = ts.tv_sec; - buf->buf.timestamp.tv_usec = - ts.tv_nsec / NSEC_PER_USEC; + buf->buf.vb2_buf.timestamp = timespec_to_ns(&ts); /* TODO: Handle PTS and SCR. */ buf->state = UVC_BUF_STATE_ACTIVE; diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index 34e416a554f6..297e10e69898 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/slab.h> #include <linux/string.h> @@ -39,6 +40,7 @@ struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id) { struct v4l2_clk *clk; struct clk *ccf_clk = clk_get(dev, id); + char clk_name[V4L2_CLK_NAME_SIZE]; if (PTR_ERR(ccf_clk) == -EPROBE_DEFER) return ERR_PTR(-EPROBE_DEFER); @@ -57,6 +59,13 @@ struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id) mutex_lock(&clk_lock); clk = v4l2_clk_find(dev_name(dev)); + /* if dev_name is not found, try use the OF name to find again */ + if (PTR_ERR(clk) == -ENODEV && dev->of_node) { + v4l2_clk_name_of(clk_name, sizeof(clk_name), + of_node_full_name(dev->of_node)); + clk = v4l2_clk_find(clk_name); + } + if (!IS_ERR(clk)) atomic_inc(&clk->use_count); mutex_unlock(&clk_lock); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 327e83ac2469..8fd84a67478a 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -630,7 +630,7 @@ static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __ } struct v4l2_ext_controls32 { - __u32 ctrl_class; + __u32 which; __u32 count; __u32 error_idx; __u32 reserved[2]; @@ -673,7 +673,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext compat_caddr_t p; if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || - get_user(kp->ctrl_class, &up->ctrl_class) || + get_user(kp->which, &up->which) || get_user(kp->count, &up->count) || get_user(kp->error_idx, &up->error_idx) || copy_from_user(kp->reserved, up->reserved, @@ -723,7 +723,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext compat_caddr_t p; if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || - put_user(kp->ctrl_class, &up->ctrl_class) || + put_user(kp->which, &up->which) || put_user(kp->count, &up->count) || put_user(kp->error_idx, &up->error_idx) || copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 4a1d9fdd14bb..c9d5537b6af7 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1491,6 +1491,17 @@ static int new_to_user(struct v4l2_ext_control *c, return ptr_to_user(c, ctrl, ctrl->p_new); } +/* Helper function: copy the initial control value back to the caller */ +static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + int idx; + + for (idx = 0; idx < ctrl->elems; idx++) + ctrl->type_ops->init(ctrl, idx, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + /* Helper function: copy the caller-provider value to the given control value */ static int user_to_ptr(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl, @@ -1762,7 +1773,7 @@ static struct v4l2_ctrl_ref *find_private_ref( list_for_each_entry(ref, &hdl->ctrl_refs, node) { /* Search for private user controls that are compatible with VIDIOC_G/S_CTRL. */ - if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && + if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { if (!ref->ctrl->is_int) continue; @@ -1831,7 +1842,7 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_ref *ref; struct v4l2_ctrl_ref *new_ref; u32 id = ctrl->id; - u32 class_ctrl = V4L2_CTRL_ID2CLASS(id) | 1; + u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ /* @@ -2253,9 +2264,9 @@ EXPORT_SYMBOL(v4l2_ctrl_add_handler); bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) { - if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) return true; - if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) return true; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: @@ -2710,7 +2721,9 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, cs->error_idx = i; - if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class) + if (cs->which && + cs->which != V4L2_CTRL_WHICH_DEF_VAL && + V4L2_CTRL_ID2WHICH(id) != cs->which) return -EINVAL; /* Old-style private controls are not allowed for @@ -2787,11 +2800,11 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, /* Handles the corner case where cs->count == 0. It checks whether the specified control class exists. If that class ID is 0, then it checks whether there are any controls at all. */ -static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class) +static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { - if (ctrl_class == 0) + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0; - return find_ref_lock(hdl, ctrl_class | 1) ? 0 : -EINVAL; + return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } @@ -2803,15 +2816,18 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs struct v4l2_ctrl_helper *helpers = helper; int ret; int i, j; + bool def_value; + + def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); cs->error_idx = cs->count; - cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class); + cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (hdl == NULL) return -EINVAL; if (cs->count == 0) - return class_check(hdl, cs->ctrl_class); + return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kmalloc_array(cs->count, sizeof(helper[0]), @@ -2829,9 +2845,11 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs for (i = 0; !ret && i < cs->count; i++) { int (*ctrl_to_user)(struct v4l2_ext_control *c, - struct v4l2_ctrl *ctrl) = cur_to_user; + struct v4l2_ctrl *ctrl); struct v4l2_ctrl *master; + ctrl_to_user = def_value ? def_to_user : cur_to_user; + if (helpers[i].mref == NULL) continue; @@ -2841,8 +2859,9 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs v4l2_ctrl_lock(master); /* g_volatile_ctrl will update the new control values */ - if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || - (master->has_volatiles && !is_cur_manual(master))) { + if (!def_value && + ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || + (master->has_volatiles && !is_cur_manual(master)))) { for (j = 0; j < master->ncontrols; j++) cur_to_new(master->cluster[j]); ret = call_op(master, g_volatile_ctrl); @@ -3064,13 +3083,18 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, int ret; cs->error_idx = cs->count; - cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class); + + /* Default value cannot be changed */ + if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) + return -EINVAL; + + cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (hdl == NULL) return -EINVAL; if (cs->count == 0) - return class_check(hdl, cs->ctrl_class); + return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kmalloc_array(cs->count, sizeof(helper[0]), @@ -3300,7 +3324,8 @@ EXPORT_SYMBOL(v4l2_ctrl_notify); int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { - bool changed; + bool value_changed; + bool range_changed = false; int ret; lockdep_assert_held(ctrl->handler->lock); @@ -3324,10 +3349,14 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, default: return -EINVAL; } - ctrl->minimum = min; - ctrl->maximum = max; - ctrl->step = step; - ctrl->default_value = def; + if ((ctrl->minimum != min) || (ctrl->maximum != max) || + (ctrl->step != step) || ctrl->default_value != def) { + range_changed = true; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + } cur_to_new(ctrl); if (validate_new(ctrl, ctrl->p_new)) { if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) @@ -3337,12 +3366,12 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, } if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) - changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; else - changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; - if (changed) + value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (value_changed) ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); - else + else if (range_changed) send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); return ret; } diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 5b0a30b9252b..7129e438f29e 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -118,11 +118,20 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev) if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) { struct i2c_client *client = v4l2_get_subdevdata(sd); - /* We need to unregister the i2c client explicitly. - We cannot rely on i2c_del_adapter to always - unregister clients for us, since if the i2c bus - is a platform bus, then it is never deleted. */ - if (client) + /* + * We need to unregister the i2c client + * explicitly. We cannot rely on + * i2c_del_adapter to always unregister + * clients for us, since if the i2c bus is a + * platform bus, then it is never deleted. + * + * Device tree or ACPI based devices must not + * be unregistered as they have not been + * registered by us, and would not be + * re-created by just probing the V4L2 driver. + */ + if (client && + !client->dev.of_node && !client->dev.fwnode) i2c_unregister_device(client); continue; } @@ -131,7 +140,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev) if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) { struct spi_device *spi = v4l2_get_subdevdata(sd); - if (spi) + if (spi && !spi->dev.of_node && !spi->dev.fwnode) spi_unregister_device(spi); continue; } diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index 6a83d6191684..ec258b73001a 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -209,8 +209,13 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, fnc, fnc_handle) && v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i, - pclock_delta)) { + pclock_delta, false)) { + u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS; + *t = v4l2_dv_timings_presets[i]; + if (can_reduce_fps(&t->bt)) + t->bt.flags |= flags; + return true; } } @@ -223,12 +228,14 @@ EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap); * @t1 - compare this v4l2_dv_timings struct... * @t2 - with this struct. * @pclock_delta - the allowed pixelclock deviation. + * @match_reduced_fps - if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. * * Compare t1 with t2 with a given margin of error for the pixelclock. */ bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1, const struct v4l2_dv_timings *t2, - unsigned pclock_delta) + unsigned pclock_delta, bool match_reduced_fps) { if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120) return false; @@ -239,9 +246,14 @@ bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1, t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta && t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta && t1->bt.hfrontporch == t2->bt.hfrontporch && + t1->bt.hsync == t2->bt.hsync && + t1->bt.hbackporch == t2->bt.hbackporch && t1->bt.vfrontporch == t2->bt.vfrontporch && t1->bt.vsync == t2->bt.vsync && t1->bt.vbackporch == t2->bt.vbackporch && + (!match_reduced_fps || + (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) == + (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) && (!t1->bt.interlaced || (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch && t1->bt.il_vsync == t2->bt.il_vsync && diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c index 5bdfb8d5263a..5d673357f75f 100644 --- a/drivers/media/v4l2-core/v4l2-flash-led-class.c +++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -107,10 +107,10 @@ static void v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) return; - led_set_brightness(&v4l2_flash->fled_cdev->led_cdev, + led_set_brightness_sync(&v4l2_flash->fled_cdev->led_cdev, brightness); } else { - led_set_brightness(&v4l2_flash->iled_cdev->led_cdev, + led_set_brightness_sync(&v4l2_flash->iled_cdev->led_cdev, brightness); } } @@ -206,11 +206,11 @@ static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) case V4L2_CID_FLASH_LED_MODE: switch (c->val) { case V4L2_FLASH_LED_MODE_NONE: - led_set_brightness(led_cdev, LED_OFF); + led_set_brightness_sync(led_cdev, LED_OFF); return led_set_flash_strobe(fled_cdev, false); case V4L2_FLASH_LED_MODE_FLASH: /* Turn the torch LED off */ - led_set_brightness(led_cdev, LED_OFF); + led_set_brightness_sync(led_cdev, LED_OFF); if (ctrls[STROBE_SOURCE]) { external_strobe = (ctrls[STROBE_SOURCE]->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 7486af2c8ae4..8a018c6dd16a 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -565,8 +565,8 @@ static void v4l_print_ext_controls(const void *arg, bool write_only) const struct v4l2_ext_controls *p = arg; int i; - pr_cont("class=0x%x, count=%d, error_idx=%d", - p->ctrl_class, p->count, p->error_idx); + pr_cont("which=0x%x, count=%d, error_idx=%d", + p->which, p->count, p->error_idx); for (i = 0; i < p->count; i++) { if (!p->controls[i].size) pr_cont(", id/val=0x%x/0x%x", @@ -902,13 +902,13 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv) Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL is it allowed for backwards compatibility. */ - if (!allow_priv && c->ctrl_class == V4L2_CID_PRIVATE_BASE) + if (!allow_priv && c->which == V4L2_CID_PRIVATE_BASE) return 0; - if (c->ctrl_class == 0) + if (!c->which) return 1; /* Check that all controls are from the same control class. */ for (i = 0; i < c->count; i++) { - if (V4L2_CTRL_ID2CLASS(c->controls[i].id) != c->ctrl_class) { + if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { c->error_idx = i; return 0; } @@ -1969,7 +1969,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, if (ops->vidioc_g_ext_ctrls == NULL) return -ENOTTY; - ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id); + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); ctrls.count = 1; ctrls.controls = &ctrl; ctrl.id = p->id; @@ -2003,7 +2003,7 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, if (ops->vidioc_s_ext_ctrls == NULL) return -ENOTTY; - ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id); + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); ctrls.count = 1; ctrls.controls = &ctrl; ctrl.id = p->id; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 33bdd81065e8..c5d49d7a0d76 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -28,11 +28,161 @@ #include <trace/events/vb2.h> -#include "videobuf2-internal.h" +static int debug; +module_param(debug, int, 0644); -int vb2_debug; -EXPORT_SYMBOL_GPL(vb2_debug); -module_param_named(debug, vb2_debug, int, 0644); +#define dprintk(level, fmt, arg...) \ + do { \ + if (debug >= level) \ + pr_info("vb2-core: %s: " fmt, __func__, ## arg); \ + } while (0) + +#ifdef CONFIG_VIDEO_ADV_DEBUG + +/* + * If advanced debugging is on, then count how often each op is called + * successfully, which can either be per-buffer or per-queue. + * + * This makes it easy to check that the 'init' and 'cleanup' + * (and variations thereof) stay balanced. + */ + +#define log_memop(vb, op) \ + dprintk(2, "call_memop(%p, %d, %s)%s\n", \ + (vb)->vb2_queue, (vb)->index, #op, \ + (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") + +#define call_memop(vb, op, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + int err; \ + \ + log_memop(vb, op); \ + err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ + if (!err) \ + (vb)->cnt_mem_ ## op++; \ + err; \ +}) + +#define call_ptr_memop(vb, op, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + void *ptr; \ + \ + log_memop(vb, op); \ + ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \ + if (!IS_ERR_OR_NULL(ptr)) \ + (vb)->cnt_mem_ ## op++; \ + ptr; \ +}) + +#define call_void_memop(vb, op, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + \ + log_memop(vb, op); \ + if (_q->mem_ops->op) \ + _q->mem_ops->op(args); \ + (vb)->cnt_mem_ ## op++; \ +}) + +#define log_qop(q, op) \ + dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ + (q)->ops->op ? "" : " (nop)") + +#define call_qop(q, op, args...) \ +({ \ + int err; \ + \ + log_qop(q, op); \ + err = (q)->ops->op ? (q)->ops->op(args) : 0; \ + if (!err) \ + (q)->cnt_ ## op++; \ + err; \ +}) + +#define call_void_qop(q, op, args...) \ +({ \ + log_qop(q, op); \ + if ((q)->ops->op) \ + (q)->ops->op(args); \ + (q)->cnt_ ## op++; \ +}) + +#define log_vb_qop(vb, op, args...) \ + dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ + (vb)->vb2_queue, (vb)->index, #op, \ + (vb)->vb2_queue->ops->op ? "" : " (nop)") + +#define call_vb_qop(vb, op, args...) \ +({ \ + int err; \ + \ + log_vb_qop(vb, op); \ + err = (vb)->vb2_queue->ops->op ? \ + (vb)->vb2_queue->ops->op(args) : 0; \ + if (!err) \ + (vb)->cnt_ ## op++; \ + err; \ +}) + +#define call_void_vb_qop(vb, op, args...) \ +({ \ + log_vb_qop(vb, op); \ + if ((vb)->vb2_queue->ops->op) \ + (vb)->vb2_queue->ops->op(args); \ + (vb)->cnt_ ## op++; \ +}) + +#else + +#define call_memop(vb, op, args...) \ + ((vb)->vb2_queue->mem_ops->op ? \ + (vb)->vb2_queue->mem_ops->op(args) : 0) + +#define call_ptr_memop(vb, op, args...) \ + ((vb)->vb2_queue->mem_ops->op ? \ + (vb)->vb2_queue->mem_ops->op(args) : NULL) + +#define call_void_memop(vb, op, args...) \ + do { \ + if ((vb)->vb2_queue->mem_ops->op) \ + (vb)->vb2_queue->mem_ops->op(args); \ + } while (0) + +#define call_qop(q, op, args...) \ + ((q)->ops->op ? (q)->ops->op(args) : 0) + +#define call_void_qop(q, op, args...) \ + do { \ + if ((q)->ops->op) \ + (q)->ops->op(args); \ + } while (0) + +#define call_vb_qop(vb, op, args...) \ + ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) + +#define call_void_vb_qop(vb, op, args...) \ + do { \ + if ((vb)->vb2_queue->ops->op) \ + (vb)->vb2_queue->ops->op(args); \ + } while (0) + +#endif + +#define call_bufop(q, op, args...) \ +({ \ + int ret = 0; \ + if (q && q->buf_ops && q->buf_ops->op) \ + ret = q->buf_ops->op(args); \ + ret; \ +}) + +#define call_void_bufop(q, op, args...) \ +({ \ + if (q && q->buf_ops && q->buf_ops->op) \ + q->buf_ops->op(args); \ +}) static void __vb2_queue_cancel(struct vb2_queue *q); static void __enqueue_in_driver(struct vb2_buffer *vb); @@ -53,7 +203,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) * NOTE: mmapped areas should be page aligned */ for (plane = 0; plane < vb->num_planes; ++plane) { - unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); + unsigned long size = PAGE_ALIGN(vb->planes[plane].length); mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], size, dma_dir, q->gfp_flags); @@ -62,7 +212,6 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) /* Associate allocator private data with this plane */ vb->planes[plane].mem_priv = mem_priv; - vb->planes[plane].length = q->plane_sizes[plane]; } return 0; @@ -137,57 +286,30 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) } /** - * __setup_lengths() - setup initial lengths for every plane in - * every buffer on the queue - */ -static void __setup_lengths(struct vb2_queue *q, unsigned int n) -{ - unsigned int buffer, plane; - struct vb2_buffer *vb; - - for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { - vb = q->bufs[buffer]; - if (!vb) - continue; - - for (plane = 0; plane < vb->num_planes; ++plane) - vb->planes[plane].length = q->plane_sizes[plane]; - } -} - -/** * __setup_offsets() - setup unique offsets ("cookies") for every plane in - * every buffer on the queue + * the buffer. */ -static void __setup_offsets(struct vb2_queue *q, unsigned int n) +static void __setup_offsets(struct vb2_buffer *vb) { - unsigned int buffer, plane; - struct vb2_buffer *vb; - unsigned long off; + struct vb2_queue *q = vb->vb2_queue; + unsigned int plane; + unsigned long off = 0; + + if (vb->index) { + struct vb2_buffer *prev = q->bufs[vb->index - 1]; + struct vb2_plane *p = &prev->planes[prev->num_planes - 1]; - if (q->num_buffers) { - struct vb2_plane *p; - vb = q->bufs[q->num_buffers - 1]; - p = &vb->planes[vb->num_planes - 1]; off = PAGE_ALIGN(p->m.offset + p->length); - } else { - off = 0; } - for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { - vb = q->bufs[buffer]; - if (!vb) - continue; - - for (plane = 0; plane < vb->num_planes; ++plane) { - vb->planes[plane].m.offset = off; + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].m.offset = off; - dprintk(3, "buffer %d, plane %d offset 0x%08lx\n", - buffer, plane, off); + dprintk(3, "buffer %d, plane %d offset 0x%08lx\n", + vb->index, plane, off); - off += vb->planes[plane].length; - off = PAGE_ALIGN(off); - } + off += vb->planes[plane].length; + off = PAGE_ALIGN(off); } } @@ -199,9 +321,10 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) * Returns the number of buffers successfully allocated. */ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, - unsigned int num_buffers, unsigned int num_planes) + unsigned int num_buffers, unsigned int num_planes, + const unsigned plane_sizes[VB2_MAX_PLANES]) { - unsigned int buffer; + unsigned int buffer, plane; struct vb2_buffer *vb; int ret; @@ -219,6 +342,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, vb->index = q->num_buffers + buffer; vb->type = q->type; vb->memory = memory; + for (plane = 0; plane < num_planes; ++plane) { + vb->planes[plane].length = plane_sizes[plane]; + vb->planes[plane].min_length = plane_sizes[plane]; + } + q->bufs[vb->index] = vb; /* Allocate video buffer memory for the MMAP type */ if (memory == VB2_MEMORY_MMAP) { @@ -226,9 +354,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, if (ret) { dprintk(1, "failed allocating memory for " "buffer %d\n", buffer); + q->bufs[vb->index] = NULL; kfree(vb); break; } + __setup_offsets(vb); /* * Call the driver-provided buffer initialization * callback, if given. An error in initialization @@ -239,18 +369,13 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, dprintk(1, "buffer %d %p initialization" " failed\n", buffer, vb); __vb2_buf_mem_free(vb); + q->bufs[vb->index] = NULL; kfree(vb); break; } } - - q->bufs[q->num_buffers + buffer] = vb; } - __setup_lengths(q, buffer); - if (memory == VB2_MEMORY_MMAP) - __setup_offsets(q, buffer); - dprintk(1, "allocated %d buffers, %d plane(s) each\n", buffer, num_planes); @@ -330,7 +455,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || q->cnt_wait_prepare != q->cnt_wait_finish; - if (unbalanced || vb2_debug) { + if (unbalanced || debug) { pr_info("vb2: counters for queue %p:%s\n", q, unbalanced ? " UNBALANCED!" : ""); pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n", @@ -356,7 +481,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) vb->cnt_buf_prepare != vb->cnt_buf_finish || vb->cnt_buf_init != vb->cnt_buf_cleanup; - if (unbalanced || vb2_debug) { + if (unbalanced || debug) { pr_info("vb2: counters for queue %p, buffer %d:%s\n", q, buffer, unbalanced ? " UNBALANCED!" : ""); pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", @@ -442,13 +567,10 @@ static bool __buffers_in_use(struct vb2_queue *q) * Should be called from vidioc_querybuf ioctl handler in driver. * The passed buffer should have been verified. * This function fills the relevant information for the userspace. - * - * The return values from this function are intended to be directly returned - * from vidioc_querybuf handler in driver. */ -int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) +void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) { - return call_bufop(q, fill_user_buffer, q->bufs[index], pb); + call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); } EXPORT_SYMBOL_GPL(vb2_core_querybuf); @@ -570,6 +692,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int *count) { unsigned int num_buffers, allocated_buffers, num_planes = 0; + unsigned plane_sizes[VB2_MAX_PLANES] = { }; int ret; if (q->streaming) { @@ -613,7 +736,6 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, */ num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME); num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed); - memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = memory; @@ -621,14 +743,14 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, * Ask the driver how many buffers and planes per buffer it requires. * Driver also sets the size and allocator context for each plane. */ - ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, - q->plane_sizes, q->alloc_ctx); + ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, + plane_sizes, q->alloc_ctx); if (ret) return ret; /* Finally, allocate buffers and video memory */ allocated_buffers = - __vb2_queue_alloc(q, memory, num_buffers, num_planes); + __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(1, "memory allocation failed\n"); return -ENOMEM; @@ -646,9 +768,16 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, */ if (!ret && allocated_buffers < num_buffers) { num_buffers = allocated_buffers; + /* + * num_planes is set by the previous queue_setup(), but since it + * signals to queue_setup() whether it is called from create_bufs() + * vs reqbufs() we zero it here to signal that queue_setup() is + * called for the reqbufs() case. + */ + num_planes = 0; - ret = call_qop(q, queue_setup, q, NULL, &num_buffers, - &num_planes, q->plane_sizes, q->alloc_ctx); + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_ctx); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; @@ -701,9 +830,11 @@ EXPORT_SYMBOL_GPL(vb2_core_reqbufs); * from vidioc_create_bufs handler in driver. */ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, const void *parg) + unsigned int *count, unsigned requested_planes, + const unsigned requested_sizes[]) { unsigned int num_planes = 0, num_buffers, allocated_buffers; + unsigned plane_sizes[VB2_MAX_PLANES] = { }; int ret; if (q->num_buffers == VB2_MAX_FRAME) { @@ -712,7 +843,6 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } if (!q->num_buffers) { - memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = memory; q->waiting_for_buffers = !q->is_output; @@ -720,18 +850,23 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); + if (requested_planes && requested_sizes) { + num_planes = requested_planes; + memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); + } + /* * Ask the driver, whether the requested number of buffers, planes per * buffer and their sizes are acceptable */ - ret = call_qop(q, queue_setup, q, parg, &num_buffers, - &num_planes, q->plane_sizes, q->alloc_ctx); + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_ctx); if (ret) return ret; /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, - num_planes); + num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(1, "memory allocation failed\n"); return -ENOMEM; @@ -747,8 +882,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, * q->num_buffers contains the total number of buffers, that the * queue driver has set up */ - ret = call_qop(q, queue_setup, q, parg, &num_buffers, - &num_planes, q->plane_sizes, q->alloc_ctx); + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_ctx); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; @@ -964,11 +1099,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb) "reacquiring memory\n", plane); /* Check if the provided plane buffer is large enough */ - if (planes[plane].length < q->plane_sizes[plane]) { + if (planes[plane].length < vb->planes[plane].min_length) { dprintk(1, "provided buffer size %u is less than " "setup size %u for plane %d\n", planes[plane].length, - q->plane_sizes[plane], plane); + vb->planes[plane].min_length, + plane); ret = -EINVAL; goto err; } @@ -1081,7 +1217,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb) if (planes[plane].length == 0) planes[plane].length = dbuf->size; - if (planes[plane].length < q->plane_sizes[plane]) { + if (planes[plane].length < vb->planes[plane].min_length) { dprintk(1, "invalid dmabuf length for plane %d\n", plane); ret = -EINVAL; @@ -1263,9 +1399,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) return ret; /* Fill buffer information for the userspace */ - ret = call_bufop(q, fill_user_buffer, vb, pb); - if (ret) - return ret; + call_void_bufop(q, fill_user_buffer, vb, pb); dprintk(1, "prepare of buffer %d succeeded\n", vb->index); @@ -1386,7 +1520,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) q->waiting_for_buffers = false; vb->state = VB2_BUF_STATE_QUEUED; - call_bufop(q, set_timestamp, vb, pb); + call_void_bufop(q, copy_timestamp, vb, pb); trace_vb2_qbuf(q, vb); @@ -1398,9 +1532,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) __enqueue_in_driver(vb); /* Fill buffer information for the userspace */ - ret = call_bufop(q, fill_user_buffer, vb, pb); - if (ret) - return ret; + call_void_bufop(q, fill_user_buffer, vb, pb); /* * If streamon has been called, and we haven't yet called @@ -1623,9 +1755,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking) call_void_vb_qop(vb, buf_finish, vb); /* Fill buffer information for the userspace */ - ret = call_bufop(q, fill_user_buffer, vb, pb); - if (ret) - return ret; + call_void_bufop(q, fill_user_buffer, vb, pb); /* Remove from videobuf queue */ list_del(&vb->queued_entry); @@ -2073,6 +2203,8 @@ int vb2_core_queue_init(struct vb2_queue *q) } EXPORT_SYMBOL_GPL(vb2_core_queue_init); +static int __vb2_init_fileio(struct vb2_queue *q, int read); +static int __vb2_cleanup_fileio(struct vb2_queue *q); /** * vb2_core_queue_release() - stop streaming, release the queue and free memory * @q: videobuf2 queue @@ -2083,6 +2215,7 @@ EXPORT_SYMBOL_GPL(vb2_core_queue_init); */ void vb2_core_queue_release(struct vb2_queue *q) { + __vb2_cleanup_fileio(q); __vb2_queue_cancel(q); mutex_lock(&q->mmap_lock); __vb2_queue_free(q, q->num_buffers); @@ -2090,6 +2223,619 @@ void vb2_core_queue_release(struct vb2_queue *q) } EXPORT_SYMBOL_GPL(vb2_core_queue_release); -MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); +/** + * vb2_core_poll() - implements poll userspace operation + * @q: videobuf2 queue + * @file: file argument passed to the poll file operation handler + * @wait: wait argument passed to the poll file operation handler + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait) +{ + unsigned long req_events = poll_requested_events(wait); + struct vb2_buffer *vb = NULL; + unsigned long flags; + + if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM))) + return 0; + if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM))) + return 0; + + /* + * Start file I/O emulator only if streaming API has not been used yet. + */ + if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { + if (!q->is_output && (q->io_modes & VB2_READ) && + (req_events & (POLLIN | POLLRDNORM))) { + if (__vb2_init_fileio(q, 1)) + return POLLERR; + } + if (q->is_output && (q->io_modes & VB2_WRITE) && + (req_events & (POLLOUT | POLLWRNORM))) { + if (__vb2_init_fileio(q, 0)) + return POLLERR; + /* + * Write to OUTPUT queue can be done immediately. + */ + return POLLOUT | POLLWRNORM; + } + } + + /* + * There is nothing to wait for if the queue isn't streaming, or if the + * error flag is set. + */ + if (!vb2_is_streaming(q) || q->error) + return POLLERR; + + /* + * For output streams you can call write() as long as there are fewer + * buffers queued than there are buffers available. + */ + if (q->is_output && q->fileio && q->queued_count < q->num_buffers) + return POLLOUT | POLLWRNORM; + + if (list_empty(&q->done_list)) { + /* + * If the last buffer was dequeued from a capture queue, + * return immediately. DQBUF will return -EPIPE. + */ + if (q->last_buffer_dequeued) + return POLLIN | POLLRDNORM; + + poll_wait(file, &q->done_wq, wait); + } + + /* + * Take first buffer available for dequeuing. + */ + spin_lock_irqsave(&q->done_lock, flags); + if (!list_empty(&q->done_list)) + vb = list_first_entry(&q->done_list, struct vb2_buffer, + done_entry); + spin_unlock_irqrestore(&q->done_lock, flags); + + if (vb && (vb->state == VB2_BUF_STATE_DONE + || vb->state == VB2_BUF_STATE_ERROR)) { + return (q->is_output) ? + POLLOUT | POLLWRNORM : + POLLIN | POLLRDNORM; + } + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_poll); + +/** + * struct vb2_fileio_buf - buffer context used by file io emulator + * + * vb2 provides a compatibility layer and emulator of file io (read and + * write) calls on top of streaming API. This structure is used for + * tracking context related to the buffers. + */ +struct vb2_fileio_buf { + void *vaddr; + unsigned int size; + unsigned int pos; + unsigned int queued:1; +}; + +/** + * struct vb2_fileio_data - queue context used by file io emulator + * + * @cur_index: the index of the buffer currently being read from or + * written to. If equal to q->num_buffers then a new buffer + * must be dequeued. + * @initial_index: in the read() case all buffers are queued up immediately + * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles + * buffers. However, in the write() case no buffers are initially + * queued, instead whenever a buffer is full it is queued up by + * __vb2_perform_fileio(). Only once all available buffers have + * been queued up will __vb2_perform_fileio() start to dequeue + * buffers. This means that initially __vb2_perform_fileio() + * needs to know what buffer index to use when it is queuing up + * the buffers for the first time. That initial index is stored + * in this field. Once it is equal to q->num_buffers all + * available buffers have been queued and __vb2_perform_fileio() + * should start the normal dequeue/queue cycle. + * + * vb2 provides a compatibility layer and emulator of file io (read and + * write) calls on top of streaming API. For proper operation it required + * this structure to save the driver state between each call of the read + * or write function. + */ +struct vb2_fileio_data { + unsigned int count; + unsigned int type; + unsigned int memory; + struct vb2_buffer *b; + struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; + unsigned int cur_index; + unsigned int initial_index; + unsigned int q_count; + unsigned int dq_count; + unsigned read_once:1; + unsigned write_immediately:1; +}; + +/** + * __vb2_init_fileio() - initialize file io emulator + * @q: videobuf2 queue + * @read: mode selector (1 means read, 0 means write) + */ +static int __vb2_init_fileio(struct vb2_queue *q, int read) +{ + struct vb2_fileio_data *fileio; + int i, ret; + unsigned int count = 0; + + /* + * Sanity check + */ + if (WARN_ON((read && !(q->io_modes & VB2_READ)) || + (!read && !(q->io_modes & VB2_WRITE)))) + return -EINVAL; + + /* + * Check if device supports mapping buffers to kernel virtual space. + */ + if (!q->mem_ops->vaddr) + return -EBUSY; + + /* + * Check if streaming api has not been already activated. + */ + if (q->streaming || q->num_buffers > 0) + return -EBUSY; + + /* + * Start with count 1, driver can increase it in queue_setup() + */ + count = 1; + + dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", + (read) ? "read" : "write", count, q->fileio_read_once, + q->fileio_write_immediately); + + fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); + if (fileio == NULL) + return -ENOMEM; + + fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL); + if (fileio->b == NULL) { + kfree(fileio); + return -ENOMEM; + } + + fileio->read_once = q->fileio_read_once; + fileio->write_immediately = q->fileio_write_immediately; + + /* + * Request buffers and use MMAP type to force driver + * to allocate buffers by itself. + */ + fileio->count = count; + fileio->memory = VB2_MEMORY_MMAP; + fileio->type = q->type; + q->fileio = fileio; + ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count); + if (ret) + goto err_kfree; + + /* + * Check if plane_count is correct + * (multiplane buffers are not supported). + */ + if (q->bufs[0]->num_planes != 1) { + ret = -EBUSY; + goto err_reqbufs; + } + + /* + * Get kernel address of each buffer. + */ + for (i = 0; i < q->num_buffers; i++) { + fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); + if (fileio->bufs[i].vaddr == NULL) { + ret = -EINVAL; + goto err_reqbufs; + } + fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); + } + + /* + * Read mode requires pre queuing of all buffers. + */ + if (read) { + /* + * Queue all buffers. + */ + for (i = 0; i < q->num_buffers; i++) { + struct vb2_buffer *b = fileio->b; + + memset(b, 0, q->buf_struct_size); + b->type = q->type; + b->memory = q->memory; + b->index = i; + ret = vb2_core_qbuf(q, i, b); + if (ret) + goto err_reqbufs; + fileio->bufs[i].queued = 1; + } + /* + * All buffers have been queued, so mark that by setting + * initial_index to q->num_buffers + */ + fileio->initial_index = q->num_buffers; + fileio->cur_index = q->num_buffers; + } + + /* + * Start streaming. + */ + ret = vb2_core_streamon(q, q->type); + if (ret) + goto err_reqbufs; + + return ret; + +err_reqbufs: + fileio->count = 0; + vb2_core_reqbufs(q, fileio->memory, &fileio->count); + +err_kfree: + q->fileio = NULL; + kfree(fileio); + return ret; +} + +/** + * __vb2_cleanup_fileio() - free resourced used by file io emulator + * @q: videobuf2 queue + */ +static int __vb2_cleanup_fileio(struct vb2_queue *q) +{ + struct vb2_fileio_data *fileio = q->fileio; + + if (fileio) { + vb2_core_streamoff(q, q->type); + q->fileio = NULL; + fileio->count = 0; + vb2_core_reqbufs(q, fileio->memory, &fileio->count); + kfree(fileio->b); + kfree(fileio); + dprintk(3, "file io emulator closed\n"); + } + return 0; +} + +/** + * __vb2_perform_fileio() - perform a single file io (read or write) operation + * @q: videobuf2 queue + * @data: pointed to target userspace buffer + * @count: number of bytes to read or write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + * @read: access mode selector (1 means read, 0 means write) + */ +static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock, int read) +{ + struct vb2_fileio_data *fileio; + struct vb2_fileio_buf *buf; + bool is_multiplanar = q->is_multiplanar; + /* + * When using write() to write data to an output video node the vb2 core + * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody + * else is able to provide this information with the write() operation. + */ + bool copy_timestamp = !read && q->copy_timestamp; + int ret, index; + + dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n", + read ? "read" : "write", (long)*ppos, count, + nonblock ? "non" : ""); + + if (!data) + return -EINVAL; + + /* + * Initialize emulator on first call. + */ + if (!vb2_fileio_is_active(q)) { + ret = __vb2_init_fileio(q, read); + dprintk(3, "vb2_init_fileio result: %d\n", ret); + if (ret) + return ret; + } + fileio = q->fileio; + + /* + * Check if we need to dequeue the buffer. + */ + index = fileio->cur_index; + if (index >= q->num_buffers) { + struct vb2_buffer *b = fileio->b; + + /* + * Call vb2_dqbuf to get buffer back. + */ + memset(b, 0, q->buf_struct_size); + b->type = q->type; + b->memory = q->memory; + ret = vb2_core_dqbuf(q, b, nonblock); + dprintk(5, "vb2_dqbuf result: %d\n", ret); + if (ret) + return ret; + fileio->dq_count += 1; + + fileio->cur_index = index = b->index; + buf = &fileio->bufs[index]; + + /* + * Get number of bytes filled by the driver + */ + buf->pos = 0; + buf->queued = 0; + buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) + : vb2_plane_size(q->bufs[index], 0); + /* Compensate for data_offset on read in the multiplanar case. */ + if (is_multiplanar && read && + b->planes[0].data_offset < buf->size) { + buf->pos = b->planes[0].data_offset; + buf->size -= buf->pos; + } + } else { + buf = &fileio->bufs[index]; + } + + /* + * Limit count on last few bytes of the buffer. + */ + if (buf->pos + count > buf->size) { + count = buf->size - buf->pos; + dprintk(5, "reducing read count: %zd\n", count); + } + + /* + * Transfer data to userspace. + */ + dprintk(3, "copying %zd bytes - buffer %d, offset %u\n", + count, index, buf->pos); + if (read) + ret = copy_to_user(data, buf->vaddr + buf->pos, count); + else + ret = copy_from_user(buf->vaddr + buf->pos, data, count); + if (ret) { + dprintk(3, "error copying data\n"); + return -EFAULT; + } + + /* + * Update counters. + */ + buf->pos += count; + *ppos += count; + + /* + * Queue next buffer if required. + */ + if (buf->pos == buf->size || (!read && fileio->write_immediately)) { + struct vb2_buffer *b = fileio->b; + + /* + * Check if this is the last buffer to read. + */ + if (read && fileio->read_once && fileio->dq_count == 1) { + dprintk(3, "read limit reached\n"); + return __vb2_cleanup_fileio(q); + } + + /* + * Call vb2_qbuf and give buffer to the driver. + */ + memset(b, 0, q->buf_struct_size); + b->type = q->type; + b->memory = q->memory; + b->index = index; + b->planes[0].bytesused = buf->pos; + + if (copy_timestamp) + b->timestamp = ktime_get_ns(); + ret = vb2_core_qbuf(q, index, b); + dprintk(5, "vb2_dbuf result: %d\n", ret); + if (ret) + return ret; + + /* + * Buffer has been queued, update the status + */ + buf->pos = 0; + buf->queued = 1; + buf->size = vb2_plane_size(q->bufs[index], 0); + fileio->q_count += 1; + /* + * If we are queuing up buffers for the first time, then + * increase initial_index by one. + */ + if (fileio->initial_index < q->num_buffers) + fileio->initial_index++; + /* + * The next buffer to use is either a buffer that's going to be + * queued for the first time (initial_index < q->num_buffers) + * or it is equal to q->num_buffers, meaning that the next + * time we need to dequeue a buffer since we've now queued up + * all the 'first time' buffers. + */ + fileio->cur_index = fileio->initial_index; + } + + /* + * Return proper number of bytes processed. + */ + if (ret == 0) + ret = count; + return ret; +} + +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblocking) +{ + return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); +} +EXPORT_SYMBOL_GPL(vb2_read); + +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblocking) +{ + return __vb2_perform_fileio(q, (char __user *) data, count, + ppos, nonblocking, 0); +} +EXPORT_SYMBOL_GPL(vb2_write); + +struct vb2_threadio_data { + struct task_struct *thread; + vb2_thread_fnc fnc; + void *priv; + bool stop; +}; + +static int vb2_thread(void *data) +{ + struct vb2_queue *q = data; + struct vb2_threadio_data *threadio = q->threadio; + struct vb2_fileio_data *fileio = q->fileio; + bool copy_timestamp = false; + int prequeue = 0; + int index = 0; + int ret = 0; + + if (q->is_output) { + prequeue = q->num_buffers; + copy_timestamp = q->copy_timestamp; + } + + set_freezable(); + + for (;;) { + struct vb2_buffer *vb; + struct vb2_buffer *b = fileio->b; + + /* + * Call vb2_dqbuf to get buffer back. + */ + memset(b, 0, q->buf_struct_size); + b->type = q->type; + b->memory = q->memory; + if (prequeue) { + b->index = index++; + prequeue--; + } else { + call_void_qop(q, wait_finish, q); + if (!threadio->stop) + ret = vb2_core_dqbuf(q, b, 0); + call_void_qop(q, wait_prepare, q); + dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); + } + if (ret || threadio->stop) + break; + try_to_freeze(); + + vb = q->bufs[b->index]; + if (b->state == VB2_BUF_STATE_DONE) + if (threadio->fnc(vb, threadio->priv)) + break; + call_void_qop(q, wait_finish, q); + if (copy_timestamp) + b->timestamp = ktime_get_ns();; + if (!threadio->stop) + ret = vb2_core_qbuf(q, b->index, b); + call_void_qop(q, wait_prepare, q); + if (ret || threadio->stop) + break; + } + + /* Hmm, linux becomes *very* unhappy without this ... */ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + return 0; +} + +/* + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailinglist first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name) +{ + struct vb2_threadio_data *threadio; + int ret = 0; + + if (q->threadio) + return -EBUSY; + if (vb2_is_busy(q)) + return -EBUSY; + if (WARN_ON(q->fileio)) + return -EBUSY; + + threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); + if (threadio == NULL) + return -ENOMEM; + threadio->fnc = fnc; + threadio->priv = priv; + + ret = __vb2_init_fileio(q, !q->is_output); + dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); + if (ret) + goto nomem; + q->threadio = threadio; + threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); + if (IS_ERR(threadio->thread)) { + ret = PTR_ERR(threadio->thread); + threadio->thread = NULL; + goto nothread; + } + return 0; + +nothread: + __vb2_cleanup_fileio(q); +nomem: + kfree(threadio); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_thread_start); + +int vb2_thread_stop(struct vb2_queue *q) +{ + struct vb2_threadio_data *threadio = q->threadio; + int err; + + if (threadio == NULL) + return 0; + threadio->stop = true; + /* Wake up all pending sleeps in the thread */ + vb2_queue_error(q); + err = kthread_stop(threadio->thread); + __vb2_cleanup_fileio(q); + threadio->thread = NULL; + kfree(threadio); + q->threadio = NULL; + return err; +} +EXPORT_SYMBOL_GPL(vb2_thread_stop); + +MODULE_DESCRIPTION("Media buffer core framework"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/v4l2-core/videobuf2-internal.h b/drivers/media/v4l2-core/videobuf2-internal.h deleted file mode 100644 index 79018c749282..000000000000 --- a/drivers/media/v4l2-core/videobuf2-internal.h +++ /dev/null @@ -1,161 +0,0 @@ -#ifndef _MEDIA_VIDEOBUF2_INTERNAL_H -#define _MEDIA_VIDEOBUF2_INTERNAL_H - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <media/videobuf2-core.h> - -extern int vb2_debug; - -#define dprintk(level, fmt, arg...) \ - do { \ - if (vb2_debug >= level) \ - pr_info("vb2: %s: " fmt, __func__, ## arg); \ - } while (0) - -#ifdef CONFIG_VIDEO_ADV_DEBUG - -/* - * If advanced debugging is on, then count how often each op is called - * successfully, which can either be per-buffer or per-queue. - * - * This makes it easy to check that the 'init' and 'cleanup' - * (and variations thereof) stay balanced. - */ - -#define log_memop(vb, op) \ - dprintk(2, "call_memop(%p, %d, %s)%s\n", \ - (vb)->vb2_queue, (vb)->index, #op, \ - (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") - -#define call_memop(vb, op, args...) \ -({ \ - struct vb2_queue *_q = (vb)->vb2_queue; \ - int err; \ - \ - log_memop(vb, op); \ - err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ - if (!err) \ - (vb)->cnt_mem_ ## op++; \ - err; \ -}) - -#define call_ptr_memop(vb, op, args...) \ -({ \ - struct vb2_queue *_q = (vb)->vb2_queue; \ - void *ptr; \ - \ - log_memop(vb, op); \ - ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \ - if (!IS_ERR_OR_NULL(ptr)) \ - (vb)->cnt_mem_ ## op++; \ - ptr; \ -}) - -#define call_void_memop(vb, op, args...) \ -({ \ - struct vb2_queue *_q = (vb)->vb2_queue; \ - \ - log_memop(vb, op); \ - if (_q->mem_ops->op) \ - _q->mem_ops->op(args); \ - (vb)->cnt_mem_ ## op++; \ -}) - -#define log_qop(q, op) \ - dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ - (q)->ops->op ? "" : " (nop)") - -#define call_qop(q, op, args...) \ -({ \ - int err; \ - \ - log_qop(q, op); \ - err = (q)->ops->op ? (q)->ops->op(args) : 0; \ - if (!err) \ - (q)->cnt_ ## op++; \ - err; \ -}) - -#define call_void_qop(q, op, args...) \ -({ \ - log_qop(q, op); \ - if ((q)->ops->op) \ - (q)->ops->op(args); \ - (q)->cnt_ ## op++; \ -}) - -#define log_vb_qop(vb, op, args...) \ - dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ - (vb)->vb2_queue, (vb)->index, #op, \ - (vb)->vb2_queue->ops->op ? "" : " (nop)") - -#define call_vb_qop(vb, op, args...) \ -({ \ - int err; \ - \ - log_vb_qop(vb, op); \ - err = (vb)->vb2_queue->ops->op ? \ - (vb)->vb2_queue->ops->op(args) : 0; \ - if (!err) \ - (vb)->cnt_ ## op++; \ - err; \ -}) - -#define call_void_vb_qop(vb, op, args...) \ -({ \ - log_vb_qop(vb, op); \ - if ((vb)->vb2_queue->ops->op) \ - (vb)->vb2_queue->ops->op(args); \ - (vb)->cnt_ ## op++; \ -}) - -#else - -#define call_memop(vb, op, args...) \ - ((vb)->vb2_queue->mem_ops->op ? \ - (vb)->vb2_queue->mem_ops->op(args) : 0) - -#define call_ptr_memop(vb, op, args...) \ - ((vb)->vb2_queue->mem_ops->op ? \ - (vb)->vb2_queue->mem_ops->op(args) : NULL) - -#define call_void_memop(vb, op, args...) \ - do { \ - if ((vb)->vb2_queue->mem_ops->op) \ - (vb)->vb2_queue->mem_ops->op(args); \ - } while (0) - -#define call_qop(q, op, args...) \ - ((q)->ops->op ? (q)->ops->op(args) : 0) - -#define call_void_qop(q, op, args...) \ - do { \ - if ((q)->ops->op) \ - (q)->ops->op(args); \ - } while (0) - -#define call_vb_qop(vb, op, args...) \ - ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) - -#define call_void_vb_qop(vb, op, args...) \ - do { \ - if ((vb)->vb2_queue->ops->op) \ - (vb)->vb2_queue->ops->op(args); \ - } while (0) - -#endif - -#define call_bufop(q, op, args...) \ -({ \ - int ret = 0; \ - if (q && q->buf_ops && q->buf_ops->op) \ - ret = q->buf_ops->op(args); \ - ret; \ -}) - -bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); -int vb2_verify_memory_type(struct vb2_queue *q, - enum vb2_memory memory, unsigned int type); -#endif /* _MEDIA_VIDEOBUF2_INTERNAL_H */ diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 27b4b9e7c0c2..c9a28605511a 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -31,7 +31,14 @@ #include <media/videobuf2-v4l2.h> -#include "videobuf2-internal.h" +static int debug; +module_param(debug, int, 0644); + +#define dprintk(level, fmt, arg...) \ + do { \ + if (debug >= level) \ + pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \ + } while (0) /* Flags that are set by the vb2 core */ #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ @@ -52,7 +59,7 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer return 0; /* Is memory for copying plane information present? */ - if (NULL == b->m.planes) { + if (b->m.planes == NULL) { dprintk(1, "multi-planar buffer passed but " "planes array not provided\n"); return -EINVAL; @@ -107,7 +114,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) return 0; } -static int __set_timestamp(struct vb2_buffer *vb, const void *pb) +static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) { const struct v4l2_buffer *b = pb; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -118,14 +125,12 @@ static int __set_timestamp(struct vb2_buffer *vb, const void *pb) * For output buffers copy the timestamp if needed, * and the timecode field and flag if needed. */ - if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == - V4L2_BUF_FLAG_TIMESTAMP_COPY) - vbuf->timestamp = b->timestamp; + if (q->copy_timestamp) + vb->timestamp = timeval_to_ns(&b->timestamp); vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; if (b->flags & V4L2_BUF_FLAG_TIMECODE) vbuf->timecode = b->timecode; } - return 0; }; static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) @@ -176,7 +181,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be * returned to userspace */ -static int __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) +static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) { struct v4l2_buffer *b = pb; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -191,7 +196,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) b->flags = vbuf->flags; b->field = vbuf->field; - b->timestamp = vbuf->timestamp; + b->timestamp = ns_to_timeval(vb->timestamp); b->timecode = vbuf->timecode; b->sequence = vbuf->sequence; b->reserved2 = 0; @@ -238,8 +243,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) */ b->flags &= ~V4L2_BUFFER_MASK_FLAGS; b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; - if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != - V4L2_BUF_FLAG_TIMESTAMP_COPY) { + if (!q->copy_timestamp) { /* * For non-COPY timestamps, drop timestamp source bits * and obtain the timestamp source from the queue. @@ -272,7 +276,10 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) if (vb2_buffer_in_use(q, vb)) b->flags |= V4L2_BUF_FLAG_MAPPED; - return 0; + if (!q->is_output && + b->flags & V4L2_BUF_FLAG_DONE && + b->flags & V4L2_BUF_FLAG_LAST) + q->last_buffer_dequeued = true; } /** @@ -308,8 +315,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, "for an output buffer\n"); return -EINVAL; } - vbuf->timestamp.tv_sec = 0; - vbuf->timestamp.tv_usec = 0; + vb->timestamp = 0; vbuf->sequence = 0; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { @@ -404,8 +410,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, /* Zero flags that the vb2 core handles */ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; - if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != - V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) { + if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { /* * Non-COPY timestamps and non-OUTPUT queues will get * their timestamp and timestamp source flags from the @@ -434,7 +439,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, static const struct vb2_buf_ops v4l2_buf_ops = { .fill_user_buffer = __fill_v4l2_buffer, .fill_vb2_buffer = __fill_vb2_buffer, - .set_timestamp = __set_timestamp, + .copy_timestamp = __copy_timestamp, }; /** @@ -466,8 +471,9 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) } vb = q->bufs[b->index]; ret = __verify_planes_array(vb, b); - - return ret ? ret : vb2_core_querybuf(q, b->index, b); + if (!ret) + vb2_core_querybuf(q, b->index, b); + return ret; } EXPORT_SYMBOL(vb2_querybuf); @@ -525,14 +531,52 @@ EXPORT_SYMBOL_GPL(vb2_prepare_buf); */ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) { - int ret = vb2_verify_memory_type(q, create->memory, - create->format.type); + unsigned requested_planes = 1; + unsigned requested_sizes[VIDEO_MAX_PLANES]; + struct v4l2_format *f = &create->format; + int ret = vb2_verify_memory_type(q, create->memory, f->type); + unsigned i; create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; + + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + requested_planes = f->fmt.pix_mp.num_planes; + if (requested_planes == 0 || + requested_planes > VIDEO_MAX_PLANES) + return -EINVAL; + for (i = 0; i < requested_planes; i++) + requested_sizes[i] = + f->fmt.pix_mp.plane_fmt[i].sizeimage; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + requested_sizes[0] = f->fmt.pix.sizeimage; + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + requested_sizes[0] = f->fmt.vbi.samples_per_line * + (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]); + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + requested_sizes[0] = f->fmt.sliced.io_size; + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + requested_sizes[0] = f->fmt.sdr.buffersize; + break; + default: + return -EINVAL; + } + for (i = 0; i < requested_planes; i++) + if (requested_sizes[i] == 0) + return -EINVAL; return ret ? ret : vb2_core_create_bufs(q, create->memory, - &create->count, &create->format); + &create->count, requested_planes, requested_sizes); } EXPORT_SYMBOL_GPL(vb2_create_bufs); @@ -583,10 +627,6 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, ret = vb2_core_dqbuf(q, b, nonblocking); - if (!ret && !q->is_output && - b->flags & V4L2_BUF_FLAG_LAST) - q->last_buffer_dequeued = true; - return ret; } @@ -723,14 +763,13 @@ int vb2_queue_init(struct vb2_queue *q) q->buf_ops = &v4l2_buf_ops; q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); + q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) + == V4L2_BUF_FLAG_TIMESTAMP_COPY; return vb2_core_queue_init(q); } EXPORT_SYMBOL_GPL(vb2_queue_init); -static int __vb2_init_fileio(struct vb2_queue *q, int read); -static int __vb2_cleanup_fileio(struct vb2_queue *q); - /** * vb2_queue_release() - stop streaming, release the queue and free memory * @q: videobuf2 queue @@ -741,7 +780,6 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q); */ void vb2_queue_release(struct vb2_queue *q) { - __vb2_cleanup_fileio(q); vb2_core_queue_release(q); } EXPORT_SYMBOL_GPL(vb2_queue_release); @@ -769,9 +807,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { struct video_device *vfd = video_devdata(file); unsigned long req_events = poll_requested_events(wait); - struct vb2_buffer *vb = NULL; unsigned int res = 0; - unsigned long flags; if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; @@ -782,611 +818,18 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) poll_wait(file, &fh->wait, wait); } - if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM))) - return res; - if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM))) - return res; - - /* - * Start file I/O emulator only if streaming API has not been used yet. - */ - if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { - if (!q->is_output && (q->io_modes & VB2_READ) && - (req_events & (POLLIN | POLLRDNORM))) { - if (__vb2_init_fileio(q, 1)) - return res | POLLERR; - } - if (q->is_output && (q->io_modes & VB2_WRITE) && - (req_events & (POLLOUT | POLLWRNORM))) { - if (__vb2_init_fileio(q, 0)) - return res | POLLERR; - /* - * Write to OUTPUT queue can be done immediately. - */ - return res | POLLOUT | POLLWRNORM; - } - } - - /* - * There is nothing to wait for if the queue isn't streaming, or if the - * error flag is set. - */ - if (!vb2_is_streaming(q) || q->error) - return res | POLLERR; /* * For compatibility with vb1: if QBUF hasn't been called yet, then * return POLLERR as well. This only affects capture queues, output * queues will always initialize waiting_for_buffers to false. */ - if (q->waiting_for_buffers) - return res | POLLERR; - - /* - * For output streams you can write as long as there are fewer buffers - * queued than there are buffers available. - */ - if (q->is_output && q->queued_count < q->num_buffers) - return res | POLLOUT | POLLWRNORM; - - if (list_empty(&q->done_list)) { - /* - * If the last buffer was dequeued from a capture queue, - * return immediately. DQBUF will return -EPIPE. - */ - if (q->last_buffer_dequeued) - return res | POLLIN | POLLRDNORM; - - poll_wait(file, &q->done_wq, wait); - } + if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) + return POLLERR; - /* - * Take first buffer available for dequeuing. - */ - spin_lock_irqsave(&q->done_lock, flags); - if (!list_empty(&q->done_list)) - vb = list_first_entry(&q->done_list, struct vb2_buffer, - done_entry); - spin_unlock_irqrestore(&q->done_lock, flags); - - if (vb && (vb->state == VB2_BUF_STATE_DONE - || vb->state == VB2_BUF_STATE_ERROR)) { - return (q->is_output) ? - res | POLLOUT | POLLWRNORM : - res | POLLIN | POLLRDNORM; - } - return res; + return res | vb2_core_poll(q, file, wait); } EXPORT_SYMBOL_GPL(vb2_poll); -/** - * struct vb2_fileio_buf - buffer context used by file io emulator - * - * vb2 provides a compatibility layer and emulator of file io (read and - * write) calls on top of streaming API. This structure is used for - * tracking context related to the buffers. - */ -struct vb2_fileio_buf { - void *vaddr; - unsigned int size; - unsigned int pos; - unsigned int queued:1; -}; - -/** - * struct vb2_fileio_data - queue context used by file io emulator - * - * @cur_index: the index of the buffer currently being read from or - * written to. If equal to q->num_buffers then a new buffer - * must be dequeued. - * @initial_index: in the read() case all buffers are queued up immediately - * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles - * buffers. However, in the write() case no buffers are initially - * queued, instead whenever a buffer is full it is queued up by - * __vb2_perform_fileio(). Only once all available buffers have - * been queued up will __vb2_perform_fileio() start to dequeue - * buffers. This means that initially __vb2_perform_fileio() - * needs to know what buffer index to use when it is queuing up - * the buffers for the first time. That initial index is stored - * in this field. Once it is equal to q->num_buffers all - * available buffers have been queued and __vb2_perform_fileio() - * should start the normal dequeue/queue cycle. - * - * vb2 provides a compatibility layer and emulator of file io (read and - * write) calls on top of streaming API. For proper operation it required - * this structure to save the driver state between each call of the read - * or write function. - */ -struct vb2_fileio_data { - struct v4l2_requestbuffers req; - struct v4l2_plane p; - struct v4l2_buffer b; - struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; - unsigned int cur_index; - unsigned int initial_index; - unsigned int q_count; - unsigned int dq_count; - unsigned read_once:1; - unsigned write_immediately:1; -}; - -/** - * __vb2_init_fileio() - initialize file io emulator - * @q: videobuf2 queue - * @read: mode selector (1 means read, 0 means write) - */ -static int __vb2_init_fileio(struct vb2_queue *q, int read) -{ - struct vb2_fileio_data *fileio; - int i, ret; - unsigned int count = 0; - - /* - * Sanity check - */ - if (WARN_ON((read && !(q->io_modes & VB2_READ)) || - (!read && !(q->io_modes & VB2_WRITE)))) - return -EINVAL; - - /* - * Check if device supports mapping buffers to kernel virtual space. - */ - if (!q->mem_ops->vaddr) - return -EBUSY; - - /* - * Check if streaming api has not been already activated. - */ - if (q->streaming || q->num_buffers > 0) - return -EBUSY; - - /* - * Start with count 1, driver can increase it in queue_setup() - */ - count = 1; - - dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", - (read) ? "read" : "write", count, q->fileio_read_once, - q->fileio_write_immediately); - - fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); - if (fileio == NULL) - return -ENOMEM; - - fileio->read_once = q->fileio_read_once; - fileio->write_immediately = q->fileio_write_immediately; - - /* - * Request buffers and use MMAP type to force driver - * to allocate buffers by itself. - */ - fileio->req.count = count; - fileio->req.memory = VB2_MEMORY_MMAP; - fileio->req.type = q->type; - q->fileio = fileio; - ret = vb2_core_reqbufs(q, fileio->req.memory, &fileio->req.count); - if (ret) - goto err_kfree; - - /* - * Check if plane_count is correct - * (multiplane buffers are not supported). - */ - if (q->bufs[0]->num_planes != 1) { - ret = -EBUSY; - goto err_reqbufs; - } - - /* - * Get kernel address of each buffer. - */ - for (i = 0; i < q->num_buffers; i++) { - fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); - if (fileio->bufs[i].vaddr == NULL) { - ret = -EINVAL; - goto err_reqbufs; - } - fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); - } - - /* - * Read mode requires pre queuing of all buffers. - */ - if (read) { - bool is_multiplanar = q->is_multiplanar; - - /* - * Queue all buffers. - */ - for (i = 0; i < q->num_buffers; i++) { - struct v4l2_buffer *b = &fileio->b; - - memset(b, 0, sizeof(*b)); - b->type = q->type; - if (is_multiplanar) { - memset(&fileio->p, 0, sizeof(fileio->p)); - b->m.planes = &fileio->p; - b->length = 1; - } - b->memory = q->memory; - b->index = i; - ret = vb2_internal_qbuf(q, b); - if (ret) - goto err_reqbufs; - fileio->bufs[i].queued = 1; - } - /* - * All buffers have been queued, so mark that by setting - * initial_index to q->num_buffers - */ - fileio->initial_index = q->num_buffers; - fileio->cur_index = q->num_buffers; - } - - /* - * Start streaming. - */ - ret = vb2_core_streamon(q, q->type); - if (ret) - goto err_reqbufs; - - return ret; - -err_reqbufs: - fileio->req.count = 0; - vb2_core_reqbufs(q, fileio->req.memory, &fileio->req.count); - -err_kfree: - q->fileio = NULL; - kfree(fileio); - return ret; -} - -/** - * __vb2_cleanup_fileio() - free resourced used by file io emulator - * @q: videobuf2 queue - */ -static int __vb2_cleanup_fileio(struct vb2_queue *q) -{ - struct vb2_fileio_data *fileio = q->fileio; - - if (fileio) { - vb2_core_streamoff(q, q->type); - q->fileio = NULL; - fileio->req.count = 0; - vb2_reqbufs(q, &fileio->req); - kfree(fileio); - dprintk(3, "file io emulator closed\n"); - } - return 0; -} - -/** - * __vb2_perform_fileio() - perform a single file io (read or write) operation - * @q: videobuf2 queue - * @data: pointed to target userspace buffer - * @count: number of bytes to read or write - * @ppos: file handle position tracking pointer - * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) - * @read: access mode selector (1 means read, 0 means write) - */ -static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, - loff_t *ppos, int nonblock, int read) -{ - struct vb2_fileio_data *fileio; - struct vb2_fileio_buf *buf; - bool is_multiplanar = q->is_multiplanar; - /* - * When using write() to write data to an output video node the vb2 core - * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody - * else is able to provide this information with the write() operation. - */ - bool set_timestamp = !read && - (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == - V4L2_BUF_FLAG_TIMESTAMP_COPY; - int ret, index; - - dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n", - read ? "read" : "write", (long)*ppos, count, - nonblock ? "non" : ""); - - if (!data) - return -EINVAL; - - /* - * Initialize emulator on first call. - */ - if (!vb2_fileio_is_active(q)) { - ret = __vb2_init_fileio(q, read); - dprintk(3, "vb2_init_fileio result: %d\n", ret); - if (ret) - return ret; - } - fileio = q->fileio; - - /* - * Check if we need to dequeue the buffer. - */ - index = fileio->cur_index; - if (index >= q->num_buffers) { - /* - * Call vb2_dqbuf to get buffer back. - */ - memset(&fileio->b, 0, sizeof(fileio->b)); - fileio->b.type = q->type; - fileio->b.memory = q->memory; - if (is_multiplanar) { - memset(&fileio->p, 0, sizeof(fileio->p)); - fileio->b.m.planes = &fileio->p; - fileio->b.length = 1; - } - ret = vb2_internal_dqbuf(q, &fileio->b, nonblock); - dprintk(5, "vb2_dqbuf result: %d\n", ret); - if (ret) - return ret; - fileio->dq_count += 1; - - fileio->cur_index = index = fileio->b.index; - buf = &fileio->bufs[index]; - - /* - * Get number of bytes filled by the driver - */ - buf->pos = 0; - buf->queued = 0; - buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) - : vb2_plane_size(q->bufs[index], 0); - /* Compensate for data_offset on read in the multiplanar case. */ - if (is_multiplanar && read && - fileio->b.m.planes[0].data_offset < buf->size) { - buf->pos = fileio->b.m.planes[0].data_offset; - buf->size -= buf->pos; - } - } else { - buf = &fileio->bufs[index]; - } - - /* - * Limit count on last few bytes of the buffer. - */ - if (buf->pos + count > buf->size) { - count = buf->size - buf->pos; - dprintk(5, "reducing read count: %zd\n", count); - } - - /* - * Transfer data to userspace. - */ - dprintk(3, "copying %zd bytes - buffer %d, offset %u\n", - count, index, buf->pos); - if (read) - ret = copy_to_user(data, buf->vaddr + buf->pos, count); - else - ret = copy_from_user(buf->vaddr + buf->pos, data, count); - if (ret) { - dprintk(3, "error copying data\n"); - return -EFAULT; - } - - /* - * Update counters. - */ - buf->pos += count; - *ppos += count; - - /* - * Queue next buffer if required. - */ - if (buf->pos == buf->size || (!read && fileio->write_immediately)) { - /* - * Check if this is the last buffer to read. - */ - if (read && fileio->read_once && fileio->dq_count == 1) { - dprintk(3, "read limit reached\n"); - return __vb2_cleanup_fileio(q); - } - - /* - * Call vb2_qbuf and give buffer to the driver. - */ - memset(&fileio->b, 0, sizeof(fileio->b)); - fileio->b.type = q->type; - fileio->b.memory = q->memory; - fileio->b.index = index; - fileio->b.bytesused = buf->pos; - if (is_multiplanar) { - memset(&fileio->p, 0, sizeof(fileio->p)); - fileio->p.bytesused = buf->pos; - fileio->b.m.planes = &fileio->p; - fileio->b.length = 1; - } - if (set_timestamp) - v4l2_get_timestamp(&fileio->b.timestamp); - ret = vb2_internal_qbuf(q, &fileio->b); - dprintk(5, "vb2_dbuf result: %d\n", ret); - if (ret) - return ret; - - /* - * Buffer has been queued, update the status - */ - buf->pos = 0; - buf->queued = 1; - buf->size = vb2_plane_size(q->bufs[index], 0); - fileio->q_count += 1; - /* - * If we are queuing up buffers for the first time, then - * increase initial_index by one. - */ - if (fileio->initial_index < q->num_buffers) - fileio->initial_index++; - /* - * The next buffer to use is either a buffer that's going to be - * queued for the first time (initial_index < q->num_buffers) - * or it is equal to q->num_buffers, meaning that the next - * time we need to dequeue a buffer since we've now queued up - * all the 'first time' buffers. - */ - fileio->cur_index = fileio->initial_index; - } - - /* - * Return proper number of bytes processed. - */ - if (ret == 0) - ret = count; - return ret; -} - -size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, - loff_t *ppos, int nonblocking) -{ - return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); -} -EXPORT_SYMBOL_GPL(vb2_read); - -size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, - loff_t *ppos, int nonblocking) -{ - return __vb2_perform_fileio(q, (char __user *) data, count, - ppos, nonblocking, 0); -} -EXPORT_SYMBOL_GPL(vb2_write); - -struct vb2_threadio_data { - struct task_struct *thread; - vb2_thread_fnc fnc; - void *priv; - bool stop; -}; - -static int vb2_thread(void *data) -{ - struct vb2_queue *q = data; - struct vb2_threadio_data *threadio = q->threadio; - struct vb2_fileio_data *fileio = q->fileio; - bool set_timestamp = false; - int prequeue = 0; - int index = 0; - int ret = 0; - - if (q->is_output) { - prequeue = q->num_buffers; - set_timestamp = - (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == - V4L2_BUF_FLAG_TIMESTAMP_COPY; - } - - set_freezable(); - - for (;;) { - struct vb2_buffer *vb; - - /* - * Call vb2_dqbuf to get buffer back. - */ - memset(&fileio->b, 0, sizeof(fileio->b)); - fileio->b.type = q->type; - fileio->b.memory = q->memory; - if (prequeue) { - fileio->b.index = index++; - prequeue--; - } else { - call_void_qop(q, wait_finish, q); - if (!threadio->stop) - ret = vb2_internal_dqbuf(q, &fileio->b, 0); - call_void_qop(q, wait_prepare, q); - dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); - } - if (ret || threadio->stop) - break; - try_to_freeze(); - - vb = q->bufs[fileio->b.index]; - if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) - if (threadio->fnc(vb, threadio->priv)) - break; - call_void_qop(q, wait_finish, q); - if (set_timestamp) - v4l2_get_timestamp(&fileio->b.timestamp); - if (!threadio->stop) - ret = vb2_internal_qbuf(q, &fileio->b); - call_void_qop(q, wait_prepare, q); - if (ret || threadio->stop) - break; - } - - /* Hmm, linux becomes *very* unhappy without this ... */ - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - } - return 0; -} - -/* - * This function should not be used for anything else but the videobuf2-dvb - * support. If you think you have another good use-case for this, then please - * contact the linux-media mailinglist first. - */ -int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, - const char *thread_name) -{ - struct vb2_threadio_data *threadio; - int ret = 0; - - if (q->threadio) - return -EBUSY; - if (vb2_is_busy(q)) - return -EBUSY; - if (WARN_ON(q->fileio)) - return -EBUSY; - - threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); - if (threadio == NULL) - return -ENOMEM; - threadio->fnc = fnc; - threadio->priv = priv; - - ret = __vb2_init_fileio(q, !q->is_output); - dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); - if (ret) - goto nomem; - q->threadio = threadio; - threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); - if (IS_ERR(threadio->thread)) { - ret = PTR_ERR(threadio->thread); - threadio->thread = NULL; - goto nothread; - } - return 0; - -nothread: - __vb2_cleanup_fileio(q); -nomem: - kfree(threadio); - return ret; -} -EXPORT_SYMBOL_GPL(vb2_thread_start); - -int vb2_thread_stop(struct vb2_queue *q) -{ - struct vb2_threadio_data *threadio = q->threadio; - int err; - - if (threadio == NULL) - return 0; - threadio->stop = true; - /* Wake up all pending sleeps in the thread */ - vb2_queue_error(q); - err = kthread_stop(threadio->thread); - __vb2_cleanup_fileio(q); - threadio->thread = NULL; - kfree(threadio); - q->threadio = NULL; - return err; -} -EXPORT_SYMBOL_GPL(vb2_thread_stop); - /* * The following functions are not part of the vb2 core API, but are helper * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations @@ -1440,8 +883,8 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv, return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_create_bufs(vdev->queue, p->memory, &p->count, - &p->format); + + res = vb2_create_bufs(vdev->queue, p); if (res == 0) vdev->queue->owner = file->private_data; return res; diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index e87459f6d686..acd1460cf787 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/compiler.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/slab.h> diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index b6fd9041f82f..06f00d60be46 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -18,6 +18,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> +#include <linux/property.h> #include "intel-lpss.h" @@ -25,6 +26,20 @@ static const struct intel_lpss_platform_info spt_info = { .clk_rate = 120000000, }; +static struct property_entry spt_i2c_properties[] = { + PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 230), + { }, +}; + +static struct property_set spt_i2c_pset = { + .properties = spt_i2c_properties, +}; + +static const struct intel_lpss_platform_info spt_i2c_info = { + .clk_rate = 120000000, + .pset = &spt_i2c_pset, +}; + static const struct intel_lpss_platform_info bxt_info = { .clk_rate = 100000000, }; @@ -35,8 +50,8 @@ static const struct intel_lpss_platform_info bxt_i2c_info = { static const struct acpi_device_id intel_lpss_acpi_ids[] = { /* SPT */ - { "INT3446", (kernel_ulong_t)&spt_info }, - { "INT3447", (kernel_ulong_t)&spt_info }, + { "INT3446", (kernel_ulong_t)&spt_i2c_info }, + { "INT3447", (kernel_ulong_t)&spt_i2c_info }, /* BXT */ { "80860AAC", (kernel_ulong_t)&bxt_i2c_info }, { "80860ABC", (kernel_ulong_t)&bxt_info }, diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 5bfdfccbb9a1..a7136c7ae9fb 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include "intel-lpss.h" @@ -65,9 +66,35 @@ static const struct intel_lpss_platform_info spt_info = { .clk_rate = 120000000, }; +static struct property_entry spt_i2c_properties[] = { + PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 230), + { }, +}; + +static struct property_set spt_i2c_pset = { + .properties = spt_i2c_properties, +}; + +static const struct intel_lpss_platform_info spt_i2c_info = { + .clk_rate = 120000000, + .pset = &spt_i2c_pset, +}; + +static struct property_entry uart_properties[] = { + PROPERTY_ENTRY_U32("reg-io-width", 4), + PROPERTY_ENTRY_U32("reg-shift", 2), + PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), + { }, +}; + +static struct property_set uart_pset = { + .properties = uart_properties, +}; + static const struct intel_lpss_platform_info spt_uart_info = { .clk_rate = 120000000, .clk_con_id = "baudclk", + .pset = &uart_pset, }; static const struct intel_lpss_platform_info bxt_info = { @@ -77,6 +104,7 @@ static const struct intel_lpss_platform_info bxt_info = { static const struct intel_lpss_platform_info bxt_uart_info = { .clk_rate = 100000000, .clk_con_id = "baudclk", + .pset = &uart_pset, }; static const struct intel_lpss_platform_info bxt_i2c_info = { @@ -121,20 +149,20 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, /* SPT-H */ { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, { } }; diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 6255513f54c7..1743788f1595 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -24,6 +24,7 @@ #include <linux/mfd/core.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/seq_file.h> #include <linux/io-64-nonatomic-lo-hi.h> @@ -72,7 +73,7 @@ struct intel_lpss { enum intel_lpss_dev_type type; struct clk *clk; struct clk_lookup *clock; - const struct mfd_cell *cell; + struct mfd_cell *cell; struct device *dev; void __iomem *priv; int devid; @@ -217,6 +218,7 @@ static void intel_lpss_ltr_hide(struct intel_lpss *lpss) static int intel_lpss_assign_devs(struct intel_lpss *lpss) { + const struct mfd_cell *cell; unsigned int type; type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK; @@ -224,18 +226,22 @@ static int intel_lpss_assign_devs(struct intel_lpss *lpss) switch (type) { case LPSS_DEV_I2C: - lpss->cell = &intel_lpss_i2c_cell; + cell = &intel_lpss_i2c_cell; break; case LPSS_DEV_UART: - lpss->cell = &intel_lpss_uart_cell; + cell = &intel_lpss_uart_cell; break; case LPSS_DEV_SPI: - lpss->cell = &intel_lpss_spi_cell; + cell = &intel_lpss_spi_cell; break; default: return -ENODEV; } + lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL); + if (!lpss->cell) + return -ENOMEM; + lpss->type = type; return 0; @@ -401,6 +407,8 @@ int intel_lpss_probe(struct device *dev, if (ret) return ret; + lpss->cell->pset = info->pset; + intel_lpss_init_dev(lpss); lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL); diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index 2c7f8d7c0595..0dcea9eb2d03 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h @@ -16,12 +16,14 @@ struct device; struct resource; +struct property_set; struct intel_lpss_platform_info { struct resource *mem; int irq; unsigned long clk_rate; const char *clk_con_id; + struct property_set *pset; }; int intel_lpss_probe(struct device *dev, diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 60b60dc63ddd..88bd1b1e47be 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/acpi.h> +#include <linux/property.h> #include <linux/mfd/core.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -192,6 +193,12 @@ static int mfd_add_device(struct device *parent, int id, goto fail_alias; } + if (cell->pset) { + ret = platform_device_add_properties(pdev, cell->pset); + if (ret) + goto fail_alias; + } + ret = mfd_platform_add_cell(pdev, cell, usage_count); if (ret) goto fail_alias; diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index 6ce36d6970a4..c9339f85359b 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -39,8 +39,8 @@ #include <linux/spi/max7301.h> #include <linux/spi/mc33880.h> -#include <media/timb_radio.h> -#include <media/timb_video.h> +#include <linux/platform_data/media/timb_radio.h> +#include <linux/platform_data/media/timb_video.h> #include <linux/timb_dma.h> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index d8486168415a..5914263090fc 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -171,11 +171,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) static inline int mmc_get_devidx(struct gendisk *disk) { - int devmaj = MAJOR(disk_devt(disk)); - int devidx = MINOR(disk_devt(disk)) / perdev_minors; - - if (!devmaj) - devidx = disk->first_minor / perdev_minors; + int devidx = disk->first_minor / perdev_minors; return devidx; } @@ -344,7 +340,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( struct mmc_blk_ioc_data *idata; int err; - idata = kzalloc(sizeof(*idata), GFP_KERNEL); + idata = kmalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; @@ -364,7 +360,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( if (!idata->buf_bytes) return idata; - idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); + idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; goto idata_err; @@ -2244,6 +2240,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); + md->disk->flags = GENHD_FL_EXT_DEVT; if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) md->disk->flags |= GENHD_FL_NO_PART_SCAN; diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 972ff844cf5a..4bc48f10452f 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -349,6 +349,8 @@ int mmc_add_card(struct mmc_card *card) card->dev.of_node = mmc_of_find_child_device(card->host, 0); + device_enable_async_suspend(&card->dev); + ret = device_add(&card->dev); if (ret) return ret; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5ae89e48fd85..f95d41ffc766 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -55,7 +55,6 @@ */ #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ -static struct workqueue_struct *workqueue; static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; /* @@ -66,21 +65,16 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; bool use_spi_crc = 1; module_param(use_spi_crc, bool, 0); -/* - * Internal function. Schedule delayed work in the MMC work queue. - */ static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { - return queue_delayed_work(workqueue, work, delay); -} - -/* - * Internal function. Flush all scheduled work from the MMC work queue. - */ -static void mmc_flush_scheduled_work(void) -{ - flush_workqueue(workqueue); + /* + * We use the system_freezable_wq, because of two reasons. + * First, it allows several works (not the same work item) to be + * executed simultaneously. Second, the queue becomes frozen when + * userspace becomes frozen during system PM. + */ + return queue_delayed_work(system_freezable_wq, work, delay); } #ifdef CONFIG_FAIL_MMC_REQUEST @@ -1485,7 +1479,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc) if (IS_ERR(mmc->supply.vmmc)) { if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) return -EPROBE_DEFER; - dev_info(dev, "No vmmc regulator found\n"); + dev_dbg(dev, "No vmmc regulator found\n"); } else { ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); if (ret > 0) @@ -1497,7 +1491,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc) if (IS_ERR(mmc->supply.vqmmc)) { if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) return -EPROBE_DEFER; - dev_info(dev, "No vqmmc regulator found\n"); + dev_dbg(dev, "No vqmmc regulator found\n"); } return 0; @@ -2476,15 +2470,20 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) * sdio_reset sends CMD52 to reset card. Since we do not know * if the card is being re-initialized, just send it. CMD52 * should be ignored by SD/eMMC cards. + * Skip it if we already know that we do not support SDIO commands */ - sdio_reset(host); + if (!(host->caps2 & MMC_CAP2_NO_SDIO)) + sdio_reset(host); + mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* Order's important: probe SDIO, then SD, then MMC */ - if (!mmc_attach_sdio(host)) - return 0; + if (!(host->caps2 & MMC_CAP2_NO_SDIO)) + if (!mmc_attach_sdio(host)) + return 0; + if (!mmc_attach_sd(host)) return 0; if (!mmc_attach_mmc(host)) @@ -2498,9 +2497,6 @@ int _mmc_detect_card_removed(struct mmc_host *host) { int ret; - if (host->caps & MMC_CAP_NONREMOVABLE) - return 0; - if (!host->card || mmc_card_removed(host->card)) return 1; @@ -2536,6 +2532,9 @@ int mmc_detect_card_removed(struct mmc_host *host) if (!card) return 1; + if (host->caps & MMC_CAP_NONREMOVABLE) + return 0; + ret = mmc_card_removed(card); /* * The card will be considered unchanged unless we have been asked to @@ -2567,11 +2566,6 @@ void mmc_rescan(struct work_struct *work) container_of(work, struct mmc_host, detect.work); int i; - if (host->trigger_card_event && host->ops->card_event) { - host->ops->card_event(host); - host->trigger_card_event = false; - } - if (host->rescan_disable) return; @@ -2580,6 +2574,13 @@ void mmc_rescan(struct work_struct *work) return; host->rescan_entered = 1; + if (host->trigger_card_event && host->ops->card_event) { + mmc_claim_host(host); + host->ops->card_event(host); + mmc_release_host(host); + host->trigger_card_event = false; + } + mmc_bus_get(host); /* @@ -2611,15 +2612,14 @@ void mmc_rescan(struct work_struct *work) */ mmc_bus_put(host); + mmc_claim_host(host); if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && host->ops->get_cd(host) == 0) { - mmc_claim_host(host); mmc_power_off(host); mmc_release_host(host); goto out; } - mmc_claim_host(host); for (i = 0; i < ARRAY_SIZE(freqs); i++) { if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) break; @@ -2663,7 +2663,6 @@ void mmc_stop_host(struct mmc_host *host) host->rescan_disable = 1; cancel_delayed_work_sync(&host->detect); - mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ host->pm_flags = 0; @@ -2759,14 +2758,13 @@ int mmc_flush_cache(struct mmc_card *card) } EXPORT_SYMBOL(mmc_flush_cache); -#ifdef CONFIG_PM - +#ifdef CONFIG_PM_SLEEP /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. */ -int mmc_pm_notify(struct notifier_block *notify_block, - unsigned long mode, void *unused) +static int mmc_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) { struct mmc_host *host = container_of( notify_block, struct mmc_host, pm_notify); @@ -2813,6 +2811,17 @@ int mmc_pm_notify(struct notifier_block *notify_block, return 0; } + +void mmc_register_pm_notifier(struct mmc_host *host) +{ + host->pm_notify.notifier_call = mmc_pm_notify; + register_pm_notifier(&host->pm_notify); +} + +void mmc_unregister_pm_notifier(struct mmc_host *host) +{ + unregister_pm_notifier(&host->pm_notify); +} #endif /** @@ -2836,13 +2845,9 @@ static int __init mmc_init(void) { int ret; - workqueue = alloc_ordered_workqueue("kmmcd", 0); - if (!workqueue) - return -ENOMEM; - ret = mmc_register_bus(); if (ret) - goto destroy_workqueue; + return ret; ret = mmc_register_host_class(); if (ret) @@ -2858,9 +2863,6 @@ unregister_host_class: mmc_unregister_host_class(); unregister_bus: mmc_unregister_bus(); -destroy_workqueue: - destroy_workqueue(workqueue); - return ret; } @@ -2869,7 +2871,6 @@ static void __exit mmc_exit(void) sdio_unregister_bus(); mmc_unregister_host_class(); mmc_unregister_bus(); - destroy_workqueue(workqueue); } subsys_initcall(mmc_init); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 09241e56d628..0fa86a2afc26 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -90,5 +90,13 @@ int mmc_execute_tuning(struct mmc_card *card); int mmc_hs200_to_hs400(struct mmc_card *card); int mmc_hs400_to_hs200(struct mmc_card *card); +#ifdef CONFIG_PM_SLEEP +void mmc_register_pm_notifier(struct mmc_host *host); +void mmc_unregister_pm_notifier(struct mmc_host *host); +#else +static inline void mmc_register_pm_notifier(struct mmc_host *host) { } +static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { } +#endif + #endif diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index da950c44204d..0aecd5c00b86 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -21,7 +21,6 @@ #include <linux/export.h> #include <linux/leds.h> #include <linux/slab.h> -#include <linux/suspend.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -275,7 +274,8 @@ int mmc_of_parse(struct mmc_host *host) host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; if (of_property_read_bool(np, "keep-power-in-suspend")) host->pm_caps |= MMC_PM_KEEP_POWER; - if (of_property_read_bool(np, "enable-sdio-wakeup")) + if (of_property_read_bool(np, "wakeup-source") || + of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; if (of_property_read_bool(np, "mmc-ddr-1_8v")) host->caps |= MMC_CAP_1_8V_DDR; @@ -348,9 +348,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); -#ifdef CONFIG_PM - host->pm_notify.notifier_call = mmc_pm_notify; -#endif setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); /* @@ -395,7 +392,7 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - register_pm_notifier(&host->pm_notify); + mmc_register_pm_notifier(host); return 0; } @@ -412,7 +409,7 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - unregister_pm_notifier(&host->pm_notify); + mmc_unregister_pm_notifier(host); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 3a9a79ec4343..bf49e44571f2 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card) mmc_set_clock(host, max_dtr); /* Switch card to HS mode */ - val = EXT_CSD_TIMING_HS | - card->drive_strength << EXT_CSD_DRV_STR_SHIFT; + val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, @@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) mmc_set_clock(host, max_dtr); /* Switch HS400 to HS DDR */ - val = EXT_CSD_TIMING_HS | - card->drive_strength << EXT_CSD_DRV_STR_SHIFT; + val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, true, send_status, true); @@ -1907,16 +1905,8 @@ static int mmc_shutdown(struct mmc_host *host) */ static int mmc_resume(struct mmc_host *host) { - int err = 0; - - if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { - err = _mmc_resume(host); - pm_runtime_set_active(&host->card->dev); - pm_runtime_mark_last_busy(&host->card->dev); - } pm_runtime_enable(&host->card->dev); - - return err; + return 0; } /* @@ -1944,12 +1934,9 @@ static int mmc_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) - return 0; - err = _mmc_resume(host); - if (err) - pr_err("%s: error %d doing aggressive resume\n", + if (err && err != -ENOMEDIUM) + pr_err("%s: error %d doing runtime resume\n", mmc_hostname(host), err); return 0; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 1f444269ebbe..2c90635c89af 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -489,6 +489,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned long timeout; u32 status = 0; bool use_r1b_resp = use_busy_signal; + bool expired = false; mmc_retune_hold(host); @@ -545,6 +546,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, timeout = jiffies + msecs_to_jiffies(timeout_ms); do { if (send_status) { + /* + * Due to the possibility of being preempted after + * sending the status command, check the expiration + * time first. + */ + expired = time_after(jiffies, timeout); err = __mmc_send_status(card, &status, ignore_crc); if (err) goto out; @@ -565,7 +572,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, } /* Timeout if the device never leaves the program state. */ - if (time_after(jiffies, timeout)) { + if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) { pr_err("%s: Card stuck in programming state! %s\n", mmc_hostname(host), __func__); err = -ETIMEDOUT; diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h index 096da48c6a7e..133de0426687 100644 --- a/drivers/mmc/core/pwrseq.h +++ b/drivers/mmc/core/pwrseq.h @@ -16,7 +16,7 @@ struct mmc_pwrseq_ops { }; struct mmc_pwrseq { - struct mmc_pwrseq_ops *ops; + const struct mmc_pwrseq_ops *ops; }; #ifdef CONFIG_OF diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c index ad4f94ec7e8d..4a82bc77fe49 100644 --- a/drivers/mmc/core/pwrseq_emmc.c +++ b/drivers/mmc/core/pwrseq_emmc.c @@ -51,7 +51,7 @@ static void mmc_pwrseq_emmc_free(struct mmc_host *host) kfree(pwrseq); } -static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = { +static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = { .post_power_on = mmc_pwrseq_emmc_reset, .free = mmc_pwrseq_emmc_free, }; diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index d10538bb5e07..2b16263458af 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -87,7 +87,7 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host) kfree(pwrseq); } -static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = { +static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = { .pre_power_on = mmc_pwrseq_simple_pre_power_on, .post_power_on = mmc_pwrseq_simple_post_power_on, .power_off = mmc_pwrseq_simple_power_off, diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 141eaa923e18..f2b164b214ae 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1128,16 +1128,8 @@ out: */ static int mmc_sd_resume(struct mmc_host *host) { - int err = 0; - - if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { - err = _mmc_sd_resume(host); - pm_runtime_set_active(&host->card->dev); - pm_runtime_mark_last_busy(&host->card->dev); - } pm_runtime_enable(&host->card->dev); - - return err; + return 0; } /* @@ -1165,12 +1157,9 @@ static int mmc_sd_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) - return 0; - err = _mmc_sd_resume(host); - if (err) - pr_err("%s: error %d doing aggressive resume\n", + if (err && err != -ENOMEDIUM) + pr_err("%s: error %d doing runtime resume\n", mmc_hostname(host), err); return 0; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 16d838e6d623..d61ba1a0495e 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -630,7 +630,7 @@ try_again: */ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, - ocr); + ocr_card); if (err == -EAGAIN) { sdio_reset(host); mmc_go_idle(host); diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 7e327a6dd53d..86f5b3223aae 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -322,6 +322,7 @@ int sdio_add_func(struct sdio_func *func) sdio_set_of_node(func); sdio_acpi_set_handle(func); + device_enable_async_suspend(&func->dev); ret = device_add(&func->dev); if (ret == 0) sdio_func_set_present(func); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1dee533634c9..1526b8a10b09 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -455,6 +455,7 @@ config MMC_TIFM_SD config MMC_MVSDIO tristate "Marvell MMC/SD/SDIO host driver" depends on PLAT_ORION + depends on OF ---help--- This selects the Marvell SDIO host driver. SDIO may currently be found on the Kirkwood 88F6281 and 88F6192 diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h deleted file mode 100644 index 0aa44e679df4..000000000000 --- a/drivers/mmc/host/atmel-mci-regs.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Atmel MultiMedia Card Interface driver - * - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors - * Registers and bitfields marked with [2] are only available in MCI2 - */ - -#ifndef __DRIVERS_MMC_ATMEL_MCI_H__ -#define __DRIVERS_MMC_ATMEL_MCI_H__ - -/* MCI Register Definitions */ -#define ATMCI_CR 0x0000 /* Control */ -# define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ -# define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ -# define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */ -# define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */ -# define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */ -#define ATMCI_MR 0x0004 /* Mode */ -# define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ -# define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ -# define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ -# define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ -# define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ -# define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ -# define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ -# define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */ -#define ATMCI_DTOR 0x0008 /* Data Timeout */ -# define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ -# define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ -#define ATMCI_SDCR 0x000c /* SD Card / SDIO */ -# define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ -# define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ -# define ATMCI_SDCSEL_MASK ( 3 << 0) -# define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ -# define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ -# define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */ -# define ATMCI_SDCBUS_MASK ( 3 << 6) -#define ATMCI_ARGR 0x0010 /* Command Argument */ -#define ATMCI_CMDR 0x0014 /* Command */ -# define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ -# define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ -# define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ -# define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ -# define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ -# define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ -# define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ -# define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ -# define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ -# define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ -# define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ -# define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ -# define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ -# define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ -# define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ -# define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ -# define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ -# define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ -# define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ -# define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ -# define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ -# define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ -#define ATMCI_BLKR 0x0018 /* Block */ -# define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */ -# define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ -#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ -# define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ -# define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ -#define ATMCI_RSPR 0x0020 /* Response 0 */ -#define ATMCI_RSPR1 0x0024 /* Response 1 */ -#define ATMCI_RSPR2 0x0028 /* Response 2 */ -#define ATMCI_RSPR3 0x002c /* Response 3 */ -#define ATMCI_RDR 0x0030 /* Receive Data */ -#define ATMCI_TDR 0x0034 /* Transmit Data */ -#define ATMCI_SR 0x0040 /* Status */ -#define ATMCI_IER 0x0044 /* Interrupt Enable */ -#define ATMCI_IDR 0x0048 /* Interrupt Disable */ -#define ATMCI_IMR 0x004c /* Interrupt Mask */ -# define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */ -# define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */ -# define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */ -# define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */ -# define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ -# define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ -# define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */ -# define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */ -# define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ -# define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ -# define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */ -# define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */ -# define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */ -# define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */ -# define ATMCI_RINDE ( 1 << 16) /* Response Index Error */ -# define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */ -# define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */ -# define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */ -# define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */ -# define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */ -# define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */ -# define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */ -# define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */ -# define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */ -# define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */ -# define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */ -# define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */ -# define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */ -# define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */ -# define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */ -#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */ -# define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ -# define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ -# define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */ -#define ATMCI_CFG 0x0054 /* Configuration[2] */ -# define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */ -# define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */ -# define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */ -# define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */ -#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */ -# define ATMCI_WP_EN ( 1 << 0) /* WP Enable */ -# define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */ -#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */ -# define ATMCI_GET_WP_VS(x) ((x) & 0x0f) -# define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) -#define ATMCI_VERSION 0x00FC /* Version */ -#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ - -/* This is not including the FIFO Aperture on MCI2 */ -#define ATMCI_REGS_SIZE 0x100 - -/* Register access macros */ -#ifdef CONFIG_AVR32 -#define atmci_readl(port, reg) \ - __raw_readl((port)->regs + reg) -#define atmci_writel(port, reg, value) \ - __raw_writel((value), (port)->regs + reg) -#else -#define atmci_readl(port, reg) \ - readl_relaxed((port)->regs + reg) -#define atmci_writel(port, reg, value) \ - writel_relaxed((value), (port)->regs + reg) -#endif - -/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ -#ifdef CONFIG_AVR32 -# define ATMCI_PDC_CONNECTED 0 -#else -# define ATMCI_PDC_CONNECTED 1 -#endif - -/* - * Fix sconfig's burst size according to atmel MCI. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. - * - * This can be done by finding most significant bit set. - */ -static inline unsigned int atmci_convert_chksize(unsigned int maxburst) -{ - if (maxburst > 1) - return fls(maxburst) - 2; - else - return 0; -} - -#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index bf62e429f7fc..a36ebdae2388 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -44,7 +44,141 @@ #include <asm/io.h> #include <asm/unaligned.h> -#include "atmel-mci-regs.h" +/* + * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors + * Registers and bitfields marked with [2] are only available in MCI2 + */ + +/* MCI Register Definitions */ +#define ATMCI_CR 0x0000 /* Control */ +#define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */ +#define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */ +#define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */ +#define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */ +#define ATMCI_CR_SWRST BIT(7) /* Software Reset */ +#define ATMCI_MR 0x0004 /* Mode */ +#define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ +#define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ +#define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */ +#define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */ +#define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */ +#define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */ +#define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */ +#define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */ +#define ATMCI_DTOR 0x0008 /* Data Timeout */ +#define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ +#define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ +#define ATMCI_SDCR 0x000c /* SD Card / SDIO */ +#define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */ +#define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */ +#define ATMCI_SDCSEL_MASK (3 << 0) +#define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */ +#define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */ +#define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */ +#define ATMCI_SDCBUS_MASK (3 << 6) +#define ATMCI_ARGR 0x0010 /* Command Argument */ +#define ATMCI_CMDR 0x0014 /* Command */ +#define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ +#define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */ +#define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */ +#define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */ +#define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */ +#define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */ +#define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */ +#define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */ +#define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */ +#define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */ +#define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */ +#define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */ +#define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */ +#define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */ +#define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */ +#define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */ +#define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */ +#define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */ +#define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */ +#define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */ +#define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */ +#define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */ +#define ATMCI_BLKR 0x0018 /* Block */ +#define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */ +#define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ +#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ +#define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ +#define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ +#define ATMCI_RSPR 0x0020 /* Response 0 */ +#define ATMCI_RSPR1 0x0024 /* Response 1 */ +#define ATMCI_RSPR2 0x0028 /* Response 2 */ +#define ATMCI_RSPR3 0x002c /* Response 3 */ +#define ATMCI_RDR 0x0030 /* Receive Data */ +#define ATMCI_TDR 0x0034 /* Transmit Data */ +#define ATMCI_SR 0x0040 /* Status */ +#define ATMCI_IER 0x0044 /* Interrupt Enable */ +#define ATMCI_IDR 0x0048 /* Interrupt Disable */ +#define ATMCI_IMR 0x004c /* Interrupt Mask */ +#define ATMCI_CMDRDY BIT(0) /* Command Ready */ +#define ATMCI_RXRDY BIT(1) /* Receiver Ready */ +#define ATMCI_TXRDY BIT(2) /* Transmitter Ready */ +#define ATMCI_BLKE BIT(3) /* Data Block Ended */ +#define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */ +#define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */ +#define ATMCI_ENDRX BIT(6) /* End of RX Buffer */ +#define ATMCI_ENDTX BIT(7) /* End of TX Buffer */ +#define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */ +#define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */ +#define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */ +#define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */ +#define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */ +#define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */ +#define ATMCI_RINDE BIT(16) /* Response Index Error */ +#define ATMCI_RDIRE BIT(17) /* Response Direction Error */ +#define ATMCI_RCRCE BIT(18) /* Response CRC Error */ +#define ATMCI_RENDE BIT(19) /* Response End Bit Error */ +#define ATMCI_RTOE BIT(20) /* Response Time-Out Error */ +#define ATMCI_DCRCE BIT(21) /* Data CRC Error */ +#define ATMCI_DTOE BIT(22) /* Data Time-Out Error */ +#define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */ +#define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */ +#define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */ +#define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */ +#define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */ +#define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */ +#define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */ +#define ATMCI_OVRE BIT(30) /* RX Overrun Error */ +#define ATMCI_UNRE BIT(31) /* TX Underrun Error */ +#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */ +#define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ +#define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ +#define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */ +#define ATMCI_CFG 0x0054 /* Configuration[2] */ +#define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */ +#define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */ +#define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */ +#define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */ +#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */ +#define ATMCI_WP_EN BIT(0) /* WP Enable */ +#define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */ +#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */ +#define ATMCI_GET_WP_VS(x) ((x) & 0x0f) +#define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) +#define ATMCI_VERSION 0x00FC /* Version */ +#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ + +/* This is not including the FIFO Aperture on MCI2 */ +#define ATMCI_REGS_SIZE 0x100 + +/* Register access macros */ +#define atmci_readl(port, reg) \ + __raw_readl((port)->regs + reg) +#define atmci_writel(port, reg, value) \ + __raw_writel((value), (port)->regs + reg) + +/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ +#ifdef CONFIG_AVR32 +# define ATMCI_PDC_CONNECTED 0 +#else +# define ATMCI_PDC_CONNECTED 1 +#endif #define AUTOSUSPEND_DELAY 50 @@ -584,6 +718,29 @@ static inline unsigned int atmci_get_version(struct atmel_mci *host) return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; } +/* + * Fix sconfig's burst size according to atmel MCI. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2, + * 8 -> 3, 16 -> 4. + * + * This can be done by finding most significant bit set. + */ +static inline unsigned int atmci_convert_chksize(struct atmel_mci *host, + unsigned int maxburst) +{ + unsigned int version = atmci_get_version(host); + unsigned int offset = 2; + + if (version >= 0x600) + offset = 1; + + if (maxburst > 1) + return fls(maxburst) - offset; + else + return 0; +} + static void atmci_timeout_timer(unsigned long data) { struct atmel_mci *host; @@ -1034,11 +1191,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) if (data->flags & MMC_DATA_READ) { direction = DMA_FROM_DEVICE; host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; - maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); + maxburst = atmci_convert_chksize(host, + host->dma_conf.src_maxburst); } else { direction = DMA_TO_DEVICE; host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; - maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); + maxburst = atmci_convert_chksize(host, + host->dma_conf.dst_maxburst); } if (host->caps.has_dma_conf_reg) diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h index 8984ec878fc9..8ecd9e56636a 100644 --- a/drivers/mmc/host/cb710-mmc.h +++ b/drivers/mmc/host/cb710-mmc.h @@ -29,8 +29,7 @@ static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot) static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc) { - struct platform_device *pdev = container_of(mmc_dev(mmc), - struct platform_device, dev); + struct platform_device *pdev = to_platform_device(mmc_dev(mmc)); return cb710_pdev_to_slot(pdev); } diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 7e1d13b68b06..81bdeeb05a4d 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -60,7 +60,7 @@ int dw_mci_pltfm_register(struct platform_device *pdev, regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Get registers' physical base address */ - host->phy_regs = (void *)(regs->start); + host->phy_regs = regs->start; host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index 9becebeeccd1..d9c92f31da64 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -239,20 +239,12 @@ static int dw_mci_rockchip_init(struct dw_mci *host) return 0; } -/* Common capabilities of RK3288 SoC */ -static unsigned long dw_mci_rk3288_dwmmc_caps[4] = { - MMC_CAP_RUNTIME_RESUME, /* emmc */ - MMC_CAP_RUNTIME_RESUME, /* sdmmc */ - MMC_CAP_RUNTIME_RESUME, /* sdio0 */ - MMC_CAP_RUNTIME_RESUME, /* sdio1 */ -}; static const struct dw_mci_drv_data rk2928_drv_data = { .prepare_command = dw_mci_rockchip_prepare_command, .init = dw_mci_rockchip_init, }; static const struct dw_mci_drv_data rk3288_drv_data = { - .caps = dw_mci_rk3288_dwmmc_caps, .prepare_command = dw_mci_rockchip_prepare_command, .set_ios = dw_mci_rk3288_set_ios, .execute_tuning = dw_mci_rk3288_execute_tuning, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 7a6cedbe48a8..712835177e8b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -699,7 +699,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ - cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset); + cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -1634,12 +1634,6 @@ static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) else cmd->error = 0; - if (cmd->error) { - /* newer ip versions need a delay between retries */ - if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) - mdelay(20); - } - return cmd->error; } @@ -2355,16 +2349,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) pending = mci_readl(host, MINTSTS); /* read-only mask reg */ - /* - * DTO fix - version 2.10a and below, and only if internal DMA - * is configured. - */ - if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { - if (!pending && - ((mci_readl(host, STATUS) >> 17) & 0x1fff)) - pending |= SDMMC_INT_DATA_OVER; - } - if (pending) { /* Check volt switch first, since it can look like an error */ if ((host->state == STATE_SENDING_CMD11) && @@ -3165,9 +3149,6 @@ int dw_mci_probe(struct dw_mci *host) /* Now that slots are all setup, we can enable card detect */ dw_mci_enable_cd(host); - if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) - dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); - return 0; err_dmaunmap: diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 33dfd7e72516..82a97ac4e956 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -972,7 +972,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { data->bytes_xfered = data->blocks * data->blksz; } else { - dev_err(host->dev, "interrupt events: %x\n", events); + dev_dbg(host->dev, "interrupt events: %x\n", events); msdc_reset_hw(host); host->error |= REQ_DAT_ERR; data->bytes_xfered = 0; @@ -982,10 +982,10 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, else if (events & MSDC_INT_DATCRCERR) data->error = -EILSEQ; - dev_err(host->dev, "%s: cmd=%d; blocks=%d", + dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", __func__, mrq->cmd->opcode, data->blocks); - dev_err(host->dev, "data_error=%d xfer_size=%d\n", - (int)data->error, data->bytes_xfered); + dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); } msdc_data_xfer_next(host, mrq, data); @@ -1543,7 +1543,6 @@ static int msdc_drv_probe(struct platform_device *pdev) mmc->f_min = host->src_clk_freq / (4 * 255); mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; - mmc->caps |= MMC_CAP_RUNTIME_RESUME; /* MMC core transfer sizes tunable parameters */ mmc->max_segs = MAX_BD_NUM; mmc->max_seg_size = BDMA_DESC_BUFLEN; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index a448498e3af2..42296e55b9de 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -20,15 +20,12 @@ #include <linux/scatterlist.h> #include <linux/irq.h> #include <linux/clk.h> -#include <linux/gpio.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <asm/sizes.h> #include <asm/unaligned.h> -#include <linux/platform_data/mmc-mvsdio.h> #include "mvsdio.h" @@ -704,6 +701,10 @@ static int mvsd_probe(struct platform_device *pdev) struct resource *r; int ret, irq; + if (!np) { + dev_err(&pdev->dev, "no DT node\n"); + return -ENODEV; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq < 0) @@ -727,8 +728,12 @@ static int mvsd_probe(struct platform_device *pdev) * fixed rate clock). */ host->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(host->clk)) - clk_prepare_enable(host->clk); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "no clock associated\n"); + ret = -EINVAL; + goto out; + } + clk_prepare_enable(host->clk); mmc->ops = &mvsd_ops; @@ -744,45 +749,10 @@ static int mvsd_probe(struct platform_device *pdev) mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; - if (np) { - if (IS_ERR(host->clk)) { - dev_err(&pdev->dev, "DT platforms must have a clock associated\n"); - ret = -EINVAL; - goto out; - } - - host->base_clock = clk_get_rate(host->clk) / 2; - ret = mmc_of_parse(mmc); - if (ret < 0) - goto out; - } else { - const struct mvsdio_platform_data *mvsd_data; - - mvsd_data = pdev->dev.platform_data; - if (!mvsd_data) { - ret = -ENXIO; - goto out; - } - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | - MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; - host->base_clock = mvsd_data->clock / 2; - /* GPIO 0 regarded as invalid for backward compatibility */ - if (mvsd_data->gpio_card_detect && - gpio_is_valid(mvsd_data->gpio_card_detect)) { - ret = mmc_gpio_request_cd(mmc, - mvsd_data->gpio_card_detect, - 0); - if (ret) - goto out; - } else { - mmc->caps |= MMC_CAP_NEEDS_POLL; - } - - if (mvsd_data->gpio_write_protect && - gpio_is_valid(mvsd_data->gpio_write_protect)) - mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect); - } - + host->base_clock = clk_get_rate(host->clk) / 2; + ret = mmc_of_parse(mmc); + if (ret < 0) + goto out; if (maxfreq) mmc->f_max = maxfreq; diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 6e218fb1a669..660170cd04d9 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -55,8 +55,8 @@ static int of_mmc_spi_init(struct device *dev, { struct of_mmc_spi *oms = to_of_mmc_spi(dev); - return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0, - dev_name(dev), mmc); + return request_threaded_irq(oms->detect_irq, NULL, irqhandler, + IRQF_ONESHOT, dev_name(dev), mmc); } static void of_mmc_spi_exit(struct device *dev, void *mmc) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7fb0753abe30..b6639ea0bf18 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2250,10 +2250,8 @@ static int omap_hsmmc_remove(struct platform_device *pdev) pm_runtime_get_sync(host->dev); mmc_remove_host(host->mmc); - if (host->tx_chan) - dma_release_channel(host->tx_chan); - if (host->rx_chan) - dma_release_channel(host->rx_chan); + dma_release_channel(host->tx_chan); + dma_release_channel(host->rx_chan); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1f1582f6cccb..f25f29253595 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -76,6 +76,7 @@ #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ #define ESDHC_TUNING_START_TAP 0x1 +#define ESDHC_TUNING_STEP_MASK 0x00070000 #define ESDHC_TUNING_STEP_SHIFT 16 /* pinctrl state */ @@ -489,9 +490,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) m |= ESDHC_MIX_CTRL_FBCLK_SEL; tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL); tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP; - if (imx_data->boarddata.tuning_step) + if (imx_data->boarddata.tuning_step) { + tuning_ctrl &= ~ESDHC_TUNING_STEP_MASK; tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT; - writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL); + } + writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL); } else { v &= ~ESDHC_MIX_CTRL_EXE_TUNE; } diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 06d0b50dfe71..7e7d8f0c9438 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -21,6 +21,8 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include "sdhci-pltfm.h" @@ -51,6 +53,60 @@ static const struct of_device_id sdhci_at91_dt_match[] = { {} }; +#ifdef CONFIG_PM +static int sdhci_at91_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = pltfm_host->priv; + int ret; + + ret = sdhci_runtime_suspend_host(host); + + clk_disable_unprepare(priv->gck); + clk_disable_unprepare(priv->hclock); + clk_disable_unprepare(priv->mainck); + + return ret; +} + +static int sdhci_at91_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = pltfm_host->priv; + int ret; + + ret = clk_prepare_enable(priv->mainck); + if (ret) { + dev_err(dev, "can't enable mainck\n"); + return ret; + } + + ret = clk_prepare_enable(priv->hclock); + if (ret) { + dev_err(dev, "can't enable hclock\n"); + return ret; + } + + ret = clk_prepare_enable(priv->gck); + if (ret) { + dev_err(dev, "can't enable gck\n"); + return ret; + } + + return sdhci_runtime_resume_host(host); +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, + sdhci_at91_runtime_resume, + NULL) +}; + static int sdhci_at91_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -144,12 +200,23 @@ static int sdhci_at91_probe(struct platform_device *pdev) sdhci_get_of_property(pdev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + ret = sdhci_add_host(host); if (ret) - goto clocks_disable_unprepare; + goto pm_runtime_disable; + + pm_runtime_put_autosuspend(&pdev->dev); return 0; +pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); clocks_disable_unprepare: clk_disable_unprepare(priv->gck); clk_disable_unprepare(priv->mainck); @@ -165,6 +232,10 @@ static int sdhci_at91_remove(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_at91_priv *priv = pltfm_host->priv; + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + sdhci_pltfm_unregister(pdev); clk_disable_unprepare(priv->gck); @@ -178,7 +249,7 @@ static struct platform_driver sdhci_at91_driver = { .driver = { .name = "sdhci-at91", .of_match_table = sdhci_at91_dt_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_at91_dev_pm_ops, }, .probe = sdhci_at91_probe, .remove = sdhci_at91_remove, diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 90e94a028a49..83b1226471c1 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device_node *np; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_esdhc *esdhc; int ret; np = pdev->dev.of_node; @@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) sdhci_get_of_property(pdev); + pltfm_host = sdhci_priv(host); + esdhc = pltfm_host->priv; + if (esdhc->vendor_ver == VENDOR_V_22) + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; + + if (esdhc->vendor_ver > VENDOR_V_22) + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + if (of_device_is_compatible(np, "fsl,p5040-esdhc") || of_device_is_compatible(np, "fsl,p5020-esdhc") || of_device_is_compatible(np, "fsl,p4080-esdhc") || diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index cf7ad458b4f4..cc851b065d0a 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host, if (sdhci_pci_spt_drive_strength > 0) drive_strength = sdhci_pci_spt_drive_strength & 0xf; else - drive_strength = 1; /* 33-ohm */ + drive_strength = 0; /* Default 50-ohm */ if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0) drive_strength = 0; /* Default 50-ohm */ @@ -1464,7 +1464,7 @@ static int sdhci_pci_resume(struct device *dev) static int sdhci_pci_runtime_suspend(struct device *dev) { - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; @@ -1500,7 +1500,7 @@ err_pci_runtime_suspend: static int sdhci_pci_runtime_resume(struct device *dev) { - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 87fb5ea8ebe7..072bb27a65cf 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -104,7 +104,8 @@ void sdhci_get_of_property(struct platform_device *pdev) if (of_find_property(np, "keep-power-in-suspend", NULL)) host->mmc->pm_caps |= MMC_PM_KEEP_POWER; - if (of_find_property(np, "enable-sdio-wakeup", NULL)) + if (of_property_read_bool(np, "wakeup-source") || + of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; } #else diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index ad28b49f0203..83c4bf7bc16c 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -22,12 +22,20 @@ #include <linux/of_device.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/gpio/consumer.h> #include "sdhci-pltfm.h" /* Tegra SDHOST controller vendor register definitions */ +#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 +#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 +#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 +#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) +#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) +#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) + #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 @@ -37,9 +45,9 @@ #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) -#define NVQUIRK_DISABLE_SDR50 BIT(3) -#define NVQUIRK_DISABLE_SDR104 BIT(4) -#define NVQUIRK_DISABLE_DDR50 BIT(5) +#define NVQUIRK_ENABLE_SDR50 BIT(3) +#define NVQUIRK_ENABLE_SDR104 BIT(4) +#define NVQUIRK_ENABLE_DDR50 BIT(5) struct sdhci_tegra_soc_data { const struct sdhci_pltfm_data *pdata; @@ -49,6 +57,7 @@ struct sdhci_tegra_soc_data { struct sdhci_tegra { const struct sdhci_tegra_soc_data *soc_data; struct gpio_desc *power_gpio; + bool ddr_signaling; }; static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) @@ -124,25 +133,33 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_tegra *tegra_host = pltfm_host->priv; const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; - u32 misc_ctrl; + u32 misc_ctrl, clk_ctrl; sdhci_reset(host, mask); if (!(mask & SDHCI_RESET_ALL)) return; - misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); + misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); /* Erratum: Enable SDHCI spec v3.00 support */ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; - /* Don't advertise UHS modes which aren't supported yet */ - if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50) - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50; - if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50) - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50; - if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104) - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104; - sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); + /* Advertise UHS modes as supported by host */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; + sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); + + clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE; + if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; + sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + tegra_host->ddr_signaling = false; } static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) @@ -164,15 +181,99 @@ static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); } +static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; + unsigned long host_clk; + + if (!clock) + return; + + host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; + clk_set_rate(pltfm_host->clk, host_clk); + host->max_clk = clk_get_rate(pltfm_host->clk); + + return sdhci_set_clock(host, clock); +} + +static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, + unsigned timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; + + if (timing == MMC_TIMING_UHS_DDR50) + tegra_host->ddr_signaling = true; + + return sdhci_set_uhs_signaling(host, timing); +} + +static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + /* + * DDR modes require the host to run at double the card frequency, so + * the maximum rate we can support is half of the module input clock. + */ + return clk_round_rate(pltfm_host->clk, UINT_MAX) / 2; +} + +static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; + reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; + sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); +} + +static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + unsigned int min, max; + + /* + * Start search for minimum tap value at 10, as smaller values are + * may wrongly be reported as working but fail at higher speeds, + * according to the TRM. + */ + min = 10; + while (min < 255) { + tegra_sdhci_set_tap(host, min); + if (!mmc_send_tuning(host->mmc, opcode, NULL)) + break; + min++; + } + + /* Find the maximum tap value that still passes. */ + max = min + 1; + while (max < 255) { + tegra_sdhci_set_tap(host, max); + if (mmc_send_tuning(host->mmc, opcode, NULL)) { + max--; + break; + } + max++; + } + + /* The TRM states the ideal tap value is at 75% in the passing range. */ + tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); + + return mmc_send_tuning(host->mmc, opcode, NULL); +} + static const struct sdhci_ops tegra_sdhci_ops = { .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, - .set_clock = sdhci_set_clock, + .set_clock = tegra_sdhci_set_clock, .set_bus_width = tegra_sdhci_set_bus_width, .reset = tegra_sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, - .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .platform_execute_tuning = tegra_sdhci_execute_tuning, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .get_max_clock = tegra_sdhci_get_max_clock, }; static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { @@ -184,7 +285,7 @@ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { .ops = &tegra_sdhci_ops, }; -static struct sdhci_tegra_soc_data soc_data_tegra20 = { +static const struct sdhci_tegra_soc_data soc_data_tegra20 = { .pdata = &sdhci_tegra20_pdata, .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | NVQUIRK_ENABLE_BLOCK_GAP_DET, @@ -197,14 +298,15 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &tegra_sdhci_ops, }; -static struct sdhci_tegra_soc_data soc_data_tegra30 = { +static const struct sdhci_tegra_soc_data soc_data_tegra30 = { .pdata = &sdhci_tegra30_pdata, .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | - NVQUIRK_DISABLE_SDR50 | - NVQUIRK_DISABLE_SDR104, + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104, }; static const struct sdhci_ops tegra114_sdhci_ops = { @@ -212,11 +314,12 @@ static const struct sdhci_ops tegra114_sdhci_ops = { .read_w = tegra_sdhci_readw, .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, - .set_clock = sdhci_set_clock, + .set_clock = tegra_sdhci_set_clock, .set_bus_width = tegra_sdhci_set_bus_width, .reset = tegra_sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, - .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .platform_execute_tuning = tegra_sdhci_execute_tuning, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .get_max_clock = tegra_sdhci_get_max_clock, }; static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { @@ -226,17 +329,34 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &tegra114_sdhci_ops, }; -static struct sdhci_tegra_soc_data soc_data_tegra114 = { +static const struct sdhci_tegra_soc_data soc_data_tegra114 = { .pdata = &sdhci_tegra114_pdata, - .nvquirks = NVQUIRK_DISABLE_SDR50 | - NVQUIRK_DISABLE_DDR50 | - NVQUIRK_DISABLE_SDR104, + .nvquirks = NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_DDR50 | + NVQUIRK_ENABLE_SDR104, +}; + +static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &tegra114_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra210 = { + .pdata = &sdhci_tegra210_pdata, }; static const struct of_device_id sdhci_tegra_dt_match[] = { + { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 }, { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, @@ -271,6 +391,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) rc = -ENOMEM; goto err_alloc_tegra_host; } + tegra_host->ddr_signaling = false; tegra_host->soc_data = soc_data; pltfm_host->priv = tegra_host; @@ -278,6 +399,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (rc) goto err_parse_dt; + if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) + host->mmc->caps |= MMC_CAP_1_8V_DDR; + tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", GPIOD_OUT_HIGH); if (IS_ERR(tegra_host->power_gpio)) { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b48565ed5616..d622435d1bcc 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, host->align_buffer, host->align_buffer_sz, direction); if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) goto fail; - BUG_ON(host->align_addr & host->align_mask); + BUG_ON(host->align_addr & SDHCI_ADMA2_MASK); host->sg_count = sdhci_pre_dma_transfer(host, data); if (host->sg_count < 0) @@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, * the (up to three) bytes that screw up the * alignment. */ - offset = (host->align_sz - (addr & host->align_mask)) & - host->align_mask; + offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & + SDHCI_ADMA2_MASK; if (offset) { if (data->flags & MMC_DATA_WRITE) { buffer = sdhci_kmap_atomic(sg, &flags); @@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, BUG_ON(offset > 65536); - align += host->align_sz; - align_addr += host->align_sz; + align += SDHCI_ADMA2_ALIGN; + align_addr += SDHCI_ADMA2_ALIGN; desc += host->desc_sz; @@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, BUG_ON(len > 65536); - /* tran, valid */ - sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); - desc += host->desc_sz; + if (len) { + /* tran, valid */ + sdhci_adma_write_desc(host, desc, addr, len, + ADMA2_TRAN_VALID); + desc += host->desc_sz; + } /* * If this triggers then we have a calculation bug @@ -608,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, /* Do a quick scan of the SG list for any unaligned mappings */ has_unaligned = false; for_each_sg(data->sg, sg, host->sg_count, i) - if (sg_dma_address(sg) & host->align_mask) { + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { has_unaligned = true; break; } @@ -620,15 +623,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host, align = host->align_buffer; for_each_sg(data->sg, sg, host->sg_count, i) { - if (sg_dma_address(sg) & host->align_mask) { - size = host->align_sz - - (sg_dma_address(sg) & host->align_mask); + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { + size = SDHCI_ADMA2_ALIGN - + (sg_dma_address(sg) & SDHCI_ADMA2_MASK); buffer = sdhci_kmap_atomic(sg, &flags); memcpy(buffer, align, size); sdhci_kunmap_atomic(buffer, &flags); - align += host->align_sz; + align += SDHCI_ADMA2_ALIGN; } } } @@ -768,8 +771,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) if (unlikely(broken)) { for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->length & 0x3) { - DBG("Reverting to PIO because of " - "transfer size (%d)\n", + DBG("Reverting to PIO because of transfer size (%d)\n", sg->length); host->flags &= ~SDHCI_REQ_USE_DMA; break; @@ -803,8 +805,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) if (unlikely(broken)) { for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 0x3) { - DBG("Reverting to PIO because of " - "bad alignment\n"); + DBG("Reverting to PIO because of bad alignment\n"); host->flags &= ~SDHCI_REQ_USE_DMA; break; } @@ -1016,8 +1017,8 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { if (timeout == 0) { - pr_err("%s: Controller never released " - "inhibit bit(s).\n", mmc_hostname(host->mmc)); + pr_err("%s: Controller never released inhibit bit(s).\n", + mmc_hostname(host->mmc)); sdhci_dumpregs(host); cmd->error = -EIO; tasklet_schedule(&host->finish_tasklet); @@ -1254,8 +1255,8 @@ clock_set: while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) & SDHCI_CLOCK_INT_STABLE)) { if (timeout == 0) { - pr_err("%s: Internal clock never " - "stabilised.\n", mmc_hostname(host->mmc)); + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); sdhci_dumpregs(host); return; } @@ -1274,19 +1275,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, struct mmc_host *mmc = host->mmc; u8 pwr = 0; - if (!IS_ERR(mmc->supply.vmmc)) { - spin_unlock_irq(&host->lock); - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); - spin_lock_irq(&host->lock); - - if (mode != MMC_POWER_OFF) - sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); - else - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); - - return; - } - if (mode != MMC_POWER_OFF) { switch (1 << vdd) { case MMC_VDD_165_195: @@ -1301,7 +1289,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, pwr = SDHCI_POWER_330; break; default: - BUG(); + WARN(1, "%s: Invalid vdd %#x\n", + mmc_hostname(host->mmc), vdd); + break; } } @@ -1345,6 +1335,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) mdelay(10); } + + if (!IS_ERR(mmc->supply.vmmc)) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + } } /*****************************************************************************\ @@ -1540,8 +1536,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; else { - pr_warn("%s: invalid driver type, default to " - "driver type B\n", mmc_hostname(mmc)); + pr_warn("%s: invalid driver type, default to driver type B\n", + mmc_hostname(mmc)); ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; } @@ -2015,10 +2011,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) spin_lock_irqsave(&host->lock, flags); if (!host->tuning_done) { - pr_info(DRIVER_NAME ": Timeout waiting for " - "Buffer Read Ready interrupt during tuning " - "procedure, falling back to fixed sampling " - "clock\n"); + pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ctrl &= ~SDHCI_CTRL_TUNED_CLK; ctrl &= ~SDHCI_CTRL_EXEC_TUNING; @@ -2046,9 +2039,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); } if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { - pr_info(DRIVER_NAME ": Tuning procedure" - " failed, falling back to fixed sampling" - " clock\n"); + pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n"); err = -EIO; } @@ -2293,8 +2284,8 @@ static void sdhci_timeout_timer(unsigned long data) spin_lock_irqsave(&host->lock, flags); if (host->mrq) { - pr_err("%s: Timeout waiting for hardware " - "interrupt.\n", mmc_hostname(host->mmc)); + pr_err("%s: Timeout waiting for hardware interrupt.\n", + mmc_hostname(host->mmc)); sdhci_dumpregs(host); if (host->data) { @@ -2325,9 +2316,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) BUG_ON(intmask == 0); if (!host->cmd) { - pr_err("%s: Got command interrupt 0x%08x even " - "though no command operation was in progress.\n", - mmc_hostname(host->mmc), (unsigned)intmask); + pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); sdhci_dumpregs(host); return; } @@ -2356,8 +2346,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) */ if (host->cmd->flags & MMC_RSP_BUSY) { if (host->cmd->data) - DBG("Cannot wait for busy signal when also " - "doing a data transfer"); + DBG("Cannot wait for busy signal when also doing a data transfer"); else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && !host->busy_handle) { /* Mark that command complete before busy is ended */ @@ -2451,9 +2440,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) } } - pr_err("%s: Got data interrupt 0x%08x even " - "though no data operation was in progress.\n", - mmc_hostname(host->mmc), (unsigned)intmask); + pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); sdhci_dumpregs(host); return; @@ -2760,7 +2748,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host) static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) { - if (host->runtime_suspended || host->bus_on) + if (host->bus_on) return; host->bus_on = true; pm_runtime_get_noresume(host->mmc->parent); @@ -2768,7 +2756,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) { - if (host->runtime_suspended || !host->bus_on) + if (!host->bus_on) return; host->bus_on = false; pm_runtime_put_noidle(host->mmc->parent); @@ -2896,9 +2884,8 @@ int sdhci_add_host(struct sdhci_host *host) host->version = (host->version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; if (host->version > SDHCI_SPEC_300) { - pr_err("%s: Unknown controller version (%d). " - "You may experience problems.\n", mmc_hostname(mmc), - host->version); + pr_err("%s: Unknown controller version (%d). You may experience problems.\n", + mmc_hostname(mmc), host->version); } caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : @@ -2967,24 +2954,17 @@ int sdhci_add_host(struct sdhci_host *host) if (host->flags & SDHCI_USE_64_BIT_DMA) { host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * SDHCI_ADMA2_64_DESC_SZ; - host->align_buffer_sz = SDHCI_MAX_SEGS * - SDHCI_ADMA2_64_ALIGN; host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; - host->align_sz = SDHCI_ADMA2_64_ALIGN; - host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; } else { host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * SDHCI_ADMA2_32_DESC_SZ; - host->align_buffer_sz = SDHCI_MAX_SEGS * - SDHCI_ADMA2_32_ALIGN; host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; - host->align_sz = SDHCI_ADMA2_32_ALIGN; - host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; } host->adma_table = dma_alloc_coherent(mmc_dev(mmc), host->adma_table_sz, &host->adma_addr, GFP_KERNEL); + host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); if (!host->adma_table || !host->align_buffer) { if (host->adma_table) @@ -2998,7 +2978,7 @@ int sdhci_add_host(struct sdhci_host *host) host->flags &= ~SDHCI_USE_ADMA; host->adma_table = NULL; host->align_buffer = NULL; - } else if (host->adma_addr & host->align_mask) { + } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) { pr_warn("%s: unable to allocate aligned ADMA descriptor\n", mmc_hostname(mmc)); host->flags &= ~SDHCI_USE_ADMA; @@ -3031,8 +3011,8 @@ int sdhci_add_host(struct sdhci_host *host) if (host->max_clk == 0 || host->quirks & SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { if (!host->ops->get_max_clock) { - pr_err("%s: Hardware doesn't specify base clock " - "frequency.\n", mmc_hostname(mmc)); + pr_err("%s: Hardware doesn't specify base clock frequency.\n", + mmc_hostname(mmc)); return -ENODEV; } host->max_clk = host->ops->get_max_clock(host); @@ -3294,8 +3274,8 @@ int sdhci_add_host(struct sdhci_host *host) mmc->ocr_avail_mmc &= host->ocr_avail_mmc; if (mmc->ocr_avail == 0) { - pr_err("%s: Hardware doesn't report any " - "support voltages.\n", mmc_hostname(mmc)); + pr_err("%s: Hardware doesn't report any support voltages.\n", + mmc_hostname(mmc)); return -ENODEV; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 9d4aa31b683a..7654ae5d2b4e 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -272,22 +272,27 @@ /* ADMA2 32-bit DMA descriptor size */ #define SDHCI_ADMA2_32_DESC_SZ 8 -/* ADMA2 32-bit DMA alignment */ -#define SDHCI_ADMA2_32_ALIGN 4 - /* ADMA2 32-bit descriptor */ struct sdhci_adma2_32_desc { __le16 cmd; __le16 len; __le32 addr; -} __packed __aligned(SDHCI_ADMA2_32_ALIGN); +} __packed __aligned(4); + +/* ADMA2 data alignment */ +#define SDHCI_ADMA2_ALIGN 4 +#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1) + +/* + * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte + * alignment for the descriptor table even in 32-bit DMA mode. Memory + * allocation is at least 8 byte aligned anyway, so just stipulate 8 always. + */ +#define SDHCI_ADMA2_DESC_ALIGN 8 /* ADMA2 64-bit DMA descriptor size */ #define SDHCI_ADMA2_64_DESC_SZ 12 -/* ADMA2 64-bit DMA alignment */ -#define SDHCI_ADMA2_64_ALIGN 8 - /* * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte * aligned. @@ -482,8 +487,6 @@ struct sdhci_host { dma_addr_t align_addr; /* Mapped bounce buffer */ unsigned int desc_sz; /* ADMA descriptor size */ - unsigned int align_sz; /* ADMA alignment */ - unsigned int align_mask; /* ADMA alignment mask */ struct tasklet_struct finish_tasklet; /* Tasklet structures */ diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index ad9ffea7d659..1ca8a1359cbc 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -397,38 +397,26 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) } static struct dma_chan * -sh_mmcif_request_dma_one(struct sh_mmcif_host *host, - struct sh_mmcif_plat_data *pdata, - enum dma_transfer_direction direction) +sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id) { - struct dma_slave_config cfg = { 0, }; - struct dma_chan *chan; - void *slave_data = NULL; - struct resource *res; - struct device *dev = sh_mmcif_host_to_dev(host); dma_cap_mask_t mask; - int ret; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); + if (slave_id <= 0) + return NULL; - if (pdata) - slave_data = direction == DMA_MEM_TO_DEV ? - (void *)pdata->slave_id_tx : - (void *)pdata->slave_id_rx; - - chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, - slave_data, dev, - direction == DMA_MEM_TO_DEV ? "tx" : "rx"); - - dev_dbg(dev, "%s: %s: got channel %p\n", __func__, - direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan); + return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id); +} - if (!chan) - return NULL; +static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, + struct dma_chan *chan, + enum dma_transfer_direction direction) +{ + struct resource *res; + struct dma_slave_config cfg = { 0, }; res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); - cfg.direction = direction; if (direction == DMA_DEV_TO_MEM) { @@ -439,38 +427,42 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; } - ret = dmaengine_slave_config(chan, &cfg); - if (ret < 0) { - dma_release_channel(chan); - return NULL; - } - - return chan; + return dmaengine_slave_config(chan, &cfg); } -static void sh_mmcif_request_dma(struct sh_mmcif_host *host, - struct sh_mmcif_plat_data *pdata) +static void sh_mmcif_request_dma(struct sh_mmcif_host *host) { struct device *dev = sh_mmcif_host_to_dev(host); host->dma_active = false; - if (pdata) { - if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) - return; - } else if (!dev->of_node) { - return; + /* We can only either use DMA for both Tx and Rx or not use it at all */ + if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) { + struct sh_mmcif_plat_data *pdata = dev->platform_data; + + host->chan_tx = sh_mmcif_request_dma_pdata(host, + pdata->slave_id_tx); + host->chan_rx = sh_mmcif_request_dma_pdata(host, + pdata->slave_id_rx); + } else { + host->chan_tx = dma_request_slave_channel(dev, "tx"); + host->chan_tx = dma_request_slave_channel(dev, "rx"); } + dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, + host->chan_rx); - /* We can only either use DMA for both Tx and Rx or not use it at all */ - host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV); - if (!host->chan_tx) - return; + if (!host->chan_tx || !host->chan_rx || + sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) || + sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM)) + goto error; - host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM); - if (!host->chan_rx) { + return; + +error: + if (host->chan_tx) dma_release_channel(host->chan_tx); - host->chan_tx = NULL; - } + if (host->chan_rx) + dma_release_channel(host->chan_rx); + host->chan_tx = host->chan_rx = NULL; } static void sh_mmcif_release_dma(struct sh_mmcif_host *host) @@ -1102,7 +1094,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->power_mode == MMC_POWER_UP) { if (!host->card_present) { /* See if we also get DMA */ - sh_mmcif_request_dma(host, dev->platform_data); + sh_mmcif_request_dma(host); host->card_present = true; } sh_mmcif_set_power(host, ios); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 4498e92116b8..b47122d3e8d8 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work) struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); struct mmc_request *mrq = host->mrq; struct mmc_data *data = mrq ? mrq->data : NULL; - struct scatterlist *sg = host->sg ?: data->sg; + struct scatterlist *sg; dev_warn(mmc_dev(host->mmc), "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", @@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work) case USDHI6_WAIT_FOR_MWRITE: case USDHI6_WAIT_FOR_READ: case USDHI6_WAIT_FOR_WRITE: + sg = host->sg ?: data->sg; dev_dbg(mmc_dev(host->mmc), "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index 3dad2111b7e3..70bb403f69f7 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c @@ -30,7 +30,7 @@ struct pcmciamtd_dev { struct pcmcia_device *p_dev; - caddr_t win_base; /* ioremapped address of PCMCIA window */ + void __iomem *win_base; /* ioremapped address of PCMCIA window */ unsigned int win_size; /* size of window */ unsigned int offset; /* offset into card the window currently points at */ struct map_info pcmcia_map; @@ -80,7 +80,7 @@ MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)") /* read/write{8,16} copy_{from,to} routines with window remapping * to access whole card */ -static caddr_t remap_window(struct map_info *map, unsigned long to) +static void __iomem *remap_window(struct map_info *map, unsigned long to) { struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; struct resource *win = (struct resource *) map->map_priv_2; @@ -107,7 +107,7 @@ static caddr_t remap_window(struct map_info *map, unsigned long to) static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs) { - caddr_t addr; + void __iomem *addr; map_word d = {{0}}; addr = remap_window(map, ofs); @@ -122,7 +122,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs) static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs) { - caddr_t addr; + void __iomem *addr; map_word d = {{0}}; addr = remap_window(map, ofs); @@ -143,7 +143,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long pr_debug("to = %p from = %lu len = %zd\n", to, from, len); while(len) { int toread = win_size - (from & (win_size-1)); - caddr_t addr; + void __iomem *addr; if(toread > len) toread = len; @@ -163,7 +163,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr) { - caddr_t addr = remap_window(map, adr); + void __iomem *addr = remap_window(map, adr); if(!addr) return; @@ -175,7 +175,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr) { - caddr_t addr = remap_window(map, adr); + void __iomem *addr = remap_window(map, adr); if(!addr) return; @@ -192,7 +192,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v pr_debug("to = %lu from = %p len = %zd\n", to, from, len); while(len) { int towrite = win_size - (to & (win_size-1)); - caddr_t addr; + void __iomem *addr; if(towrite > len) towrite = len; @@ -216,7 +216,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; map_word d = {{0}}; if(DEV_REMOVED(map)) @@ -231,7 +231,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) static map_word pcmcia_read16(struct map_info *map, unsigned long ofs) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; map_word d = {{0}}; if(DEV_REMOVED(map)) @@ -246,7 +246,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs) static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; if(DEV_REMOVED(map)) return; @@ -258,7 +258,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; if(DEV_REMOVED(map)) return; @@ -271,7 +271,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; if(DEV_REMOVED(map)) return; @@ -284,7 +284,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { - caddr_t win_base = (caddr_t)map->map_priv_2; + void __iomem *win_base = (void __iomem *)map->map_priv_2; if(DEV_REMOVED(map)) return; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 95c13b2ffa79..ffa288474820 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -426,15 +426,6 @@ int add_mtd_device(struct mtd_info *mtd) mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; - if (mtd->dev.parent) { - if (!mtd->owner && mtd->dev.parent->driver) - mtd->owner = mtd->dev.parent->driver->owner; - if (!mtd->name) - mtd->name = dev_name(mtd->dev.parent); - } else { - pr_debug("mtd device won't show a device symlink in sysfs\n"); - } - /* Some chips always power up locked. Unlock them now */ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { error = mtd_unlock(mtd, 0, mtd->size); @@ -549,6 +540,21 @@ static int mtd_add_device_partitions(struct mtd_info *mtd, return 0; } +/* + * Set a few defaults based on the parent devices, if not provided by the + * driver + */ +static void mtd_set_dev_defaults(struct mtd_info *mtd) +{ + if (mtd->dev.parent) { + if (!mtd->owner && mtd->dev.parent->driver) + mtd->owner = mtd->dev.parent->driver->owner; + if (!mtd->name) + mtd->name = dev_name(mtd->dev.parent); + } else { + pr_debug("mtd device won't show a device symlink in sysfs\n"); + } +} /** * mtd_device_parse_register - parse partitions and register an MTD device. @@ -587,6 +593,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, int ret; struct mtd_partition *real_parts = NULL; + mtd_set_dev_defaults(mtd); + ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data); if (ret <= 0 && nr_parts && parts) { real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 669c3452f278..9ed6038e47d2 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master, ofpart_node = of_get_child_by_name(mtd_node, "partitions"); if (!ofpart_node) { - pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", - master->name, mtd_node->full_name); + /* + * We might get here even when ofpart isn't used at all (e.g., + * when using another parser), so don't be louder than + * KERN_DEBUG + */ + pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", + master->name, mtd_node->full_name); ofpart_node = mtd_node; dedicated = false; + } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { + /* The 'partitions' subnode might be used by another parser */ + return 0; } /* First count the subnodes */ diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 49883905a434..32477c4eb421 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -516,8 +516,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) status_old = read_sr(nor); /* Cannot unlock; would unlock larger region than requested */ - if (stm_is_locked_sr(nor, status_old, ofs - mtd->erasesize, - mtd->erasesize)) + if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, + status_old)) return -EINVAL; /* @@ -1200,8 +1200,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || JEDEC_MFR(info) == SNOR_MFR_INTEL || - JEDEC_MFR(info) == SNOR_MFR_SST || - JEDEC_MFR(info) == SNOR_MFR_WINBOND) { + JEDEC_MFR(info) == SNOR_MFR_SST) { write_enable(nor); write_sr(nor, 0); } @@ -1217,8 +1216,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) mtd->_read = spi_nor_read; /* NOR protection support for STmicro/Micron chips and similar */ - if (JEDEC_MFR(info) == SNOR_MFR_MICRON || - JEDEC_MFR(info) == SNOR_MFR_WINBOND) { + if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { nor->flash_lock = stm_lock; nor->flash_unlock = stm_unlock; nor->flash_is_locked = stm_is_locked; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index b077e43b5ba9..c4cb15a3098c 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -236,7 +236,7 @@ int ubi_debugfs_init(void) dfs_rootdir = debugfs_create_dir("ubi", NULL); if (IS_ERR_OR_NULL(dfs_rootdir)) { - int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); + int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", err); diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 1fc23e48fe8e..10cf3b549959 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto exit; - crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index eb4489f9082f..17ec948ac40e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, return 0; } +static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk); /** * do_sync_erase - run the erase worker synchronously. * @ubi: UBI device description object @@ -615,22 +616,19 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture) { - struct ubi_work *wl_wrk; + struct ubi_work wl_wrk; dbg_wl("sync erase of PEB %i", e->pnum); - wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); - if (!wl_wrk) - return -ENOMEM; + wl_wrk.e = e; + wl_wrk.vol_id = vol_id; + wl_wrk.lnum = lnum; + wl_wrk.torture = torture; - wl_wrk->e = e; - wl_wrk->vol_id = vol_id; - wl_wrk->lnum = lnum; - wl_wrk->torture = torture; - - return erase_worker(ubi, wl_wrk, 0); + return __erase_worker(ubi, &wl_wrk); } +static int ensure_wear_leveling(struct ubi_device *ubi, int nested); /** * wear_leveling_worker - wear-leveling worker function. * @ubi: UBI device description object @@ -652,6 +650,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, #endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; + int dst_leb_clean = 0; kfree(wrk); if (shutdown) @@ -756,6 +755,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { + dst_leb_clean = 1; if (err == UBI_IO_FF) { /* * We are trying to move PEB without a VID header. UBI @@ -801,10 +801,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * protection queue. */ protect = 1; + dst_leb_clean = 1; goto out_not_moved; } if (err == MOVE_RETRY) { scrubbing = 1; + dst_leb_clean = 1; goto out_not_moved; } if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || @@ -830,6 +832,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi->erroneous_peb_count); goto out_error; } + dst_leb_clean = 1; erroneous = 1; goto out_not_moved; } @@ -900,15 +903,24 @@ out_not_moved: wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); + if (dst_leb_clean) { + wl_tree_add(e2, &ubi->free); + ubi->free_count++; + } + ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); - err = do_sync_erase(ubi, e2, vol_id, lnum, torture); - if (err) - goto out_ro; + if (dst_leb_clean) { + ensure_wear_leveling(ubi, 1); + } else { + err = do_sync_erase(ubi, e2, vol_id, lnum, torture); + if (err) + goto out_ro; + } mutex_unlock(&ubi->move_mutex); return 0; @@ -1014,7 +1026,7 @@ out_unlock: } /** - * erase_worker - physical eraseblock erase worker function. + * __erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object * @shutdown: non-zero if the worker has to free memory and exit @@ -1025,8 +1037,7 @@ out_unlock: * needed. Returns zero in case of success and a negative error code in case of * failure. */ -static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, - int shutdown) +static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) { struct ubi_wl_entry *e = wl_wrk->e; int pnum = e->pnum; @@ -1034,21 +1045,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int lnum = wl_wrk->lnum; int err, available_consumed = 0; - if (shutdown) { - dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); - kfree(wl_wrk); - wl_entry_destroy(ubi, e); - return 0; - } - dbg_wl("erase PEB %d EC %d LEB %d:%d", pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { - /* Fine, we've erased it successfully */ - kfree(wl_wrk); - spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->free); ubi->free_count++; @@ -1066,7 +1067,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, } ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); - kfree(wl_wrk); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { @@ -1075,6 +1075,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0); if (err1) { + wl_entry_destroy(ubi, e); err = err1; goto out_ro; } @@ -1150,6 +1151,25 @@ out_ro: return err; } +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + int shutdown) +{ + int ret; + + if (shutdown) { + struct ubi_wl_entry *e = wl_wrk->e; + + dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec); + kfree(wl_wrk); + wl_entry_destroy(ubi, e); + return 0; + } + + ret = __erase_worker(ubi, wl_wrk); + kfree(wl_wrk); + return ret; +} + /** * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. * @ubi: UBI device description object diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 940e2ebbdea8..4cbb8b27a891 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -93,7 +93,8 @@ enum ad_link_speed_type { AD_LINK_SPEED_10000MBPS, AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_40000MBPS, - AD_LINK_SPEED_56000MBPS + AD_LINK_SPEED_56000MBPS, + AD_LINK_SPEED_100000MBPS, }; /* compare MAC addresses */ @@ -258,6 +259,7 @@ static inline int __check_agg_selection_timer(struct port *port) * %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_40000MBPS * %AD_LINK_SPEED_56000MBPS + * %AD_LINK_SPEED_100000MBPS */ static u16 __get_link_speed(struct port *port) { @@ -305,6 +307,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_56000MBPS; break; + case SPEED_100000: + speed = AD_LINK_SPEED_100000MBPS; + break; + default: /* unknown speed value from ethtool. shouldn't happen */ speed = 0; @@ -681,6 +687,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_56000MBPS: bandwidth = aggregator->num_of_ports * 56000; break; + case AD_LINK_SPEED_100000MBPS: + bandwidth = aggregator->num_of_ports * 100000; + break; default: bandwidth = 0; /* to silence the compiler */ } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9e0f8a7ef8b1..56b560558884 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -830,7 +830,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } new_active->delay = 0; - bond_set_slave_link_state(new_active, BOND_LINK_UP); + bond_set_slave_link_state(new_active, BOND_LINK_UP, + BOND_SLAVE_NOTIFY_NOW); if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_handle_link_change(new_active, BOND_LINK_UP); @@ -1066,12 +1067,12 @@ static netdev_features_t bond_fix_features(struct net_device *dev, return features; } -#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ +#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) -#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\ - NETIF_F_ALL_TSO) +#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_RXCSUM | NETIF_F_ALL_TSO) static void bond_compute_features(struct bonding *bond) { @@ -1198,26 +1199,42 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) return ret; } -static int bond_master_upper_dev_link(struct net_device *bond_dev, - struct net_device *slave_dev, - struct slave *slave) +static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) { + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + return NETDEV_LAG_TX_TYPE_ROUNDROBIN; + case BOND_MODE_ACTIVEBACKUP: + return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + case BOND_MODE_BROADCAST: + return NETDEV_LAG_TX_TYPE_BROADCAST; + case BOND_MODE_XOR: + case BOND_MODE_8023AD: + return NETDEV_LAG_TX_TYPE_HASH; + default: + return NETDEV_LAG_TX_TYPE_UNKNOWN; + } +} + +static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave) +{ + struct netdev_lag_upper_info lag_upper_info; int err; - err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave); + lag_upper_info.tx_type = bond_lag_tx_type(bond); + err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, + &lag_upper_info); if (err) return err; - slave_dev->flags |= IFF_SLAVE; - rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL); return 0; } -static void bond_upper_dev_unlink(struct net_device *bond_dev, - struct net_device *slave_dev) +static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) { - netdev_upper_dev_unlink(slave_dev, bond_dev); - slave_dev->flags &= ~IFF_SLAVE; - rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); + netdev_upper_dev_unlink(slave->dev, bond->dev); + slave->dev->flags &= ~IFF_SLAVE; + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL); } static struct slave *bond_alloc_slave(struct bonding *bond) @@ -1299,6 +1316,16 @@ void bond_queue_slave_event(struct slave *slave) queue_delayed_work(slave->bond->wq, &nnw->work, 0); } +void bond_lower_state_changed(struct slave *slave) +{ + struct netdev_lag_lower_state_info info; + + info.link_up = slave->link == BOND_LINK_UP || + slave->link == BOND_LINK_FAIL; + info.tx_enabled = bond_is_active_slave(slave); + netdev_lower_state_changed(slave->dev, &info); +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1351,7 +1378,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * the current ifenslave will set the interface down prior to * enslaving it; the old ifenslave will not. */ - if ((slave_dev->flags & IFF_UP)) { + if (slave_dev->flags & IFF_UP) { netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n", slave_dev->name); res = -EPERM; @@ -1465,6 +1492,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } } + /* set slave flag before open to prevent IPv6 addrconf */ + slave_dev->flags |= IFF_SLAVE; + /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { @@ -1563,21 +1593,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { if (bond->params.updelay) { bond_set_slave_link_state(new_slave, - BOND_LINK_BACK); + BOND_LINK_BACK, + BOND_SLAVE_NOTIFY_NOW); new_slave->delay = bond->params.updelay; } else { bond_set_slave_link_state(new_slave, - BOND_LINK_UP); + BOND_LINK_UP, + BOND_SLAVE_NOTIFY_NOW); } } else { - bond_set_slave_link_state(new_slave, BOND_LINK_DOWN); + bond_set_slave_link_state(new_slave, BOND_LINK_DOWN, + BOND_SLAVE_NOTIFY_NOW); } } else if (bond->params.arp_interval) { bond_set_slave_link_state(new_slave, (netif_carrier_ok(slave_dev) ? - BOND_LINK_UP : BOND_LINK_DOWN)); + BOND_LINK_UP : BOND_LINK_DOWN), + BOND_SLAVE_NOTIFY_NOW); } else { - bond_set_slave_link_state(new_slave, BOND_LINK_UP); + bond_set_slave_link_state(new_slave, BOND_LINK_UP, + BOND_SLAVE_NOTIFY_NOW); } if (new_slave->link != BOND_LINK_DOWN) @@ -1662,7 +1697,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_detach; } - res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave); + res = bond_master_upper_dev_link(bond, new_slave); if (res) { netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res); goto err_unregister; @@ -1698,7 +1733,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* Undo stages on error */ err_upper_unlink: - bond_upper_dev_unlink(bond_dev, slave_dev); + bond_upper_dev_unlink(bond, new_slave); err_unregister: netdev_rx_handler_unregister(slave_dev); @@ -1725,6 +1760,7 @@ err_close: dev_close(slave_dev); err_restore_mac: + slave_dev->flags &= ~IFF_SLAVE; if (!bond->params.fail_over_mac || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* XXX TODO - fom follow mode needs to change master's @@ -1799,12 +1835,14 @@ static int __bond_release_one(struct net_device *bond_dev, return -EINVAL; } + bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); + bond_sysfs_slave_del(slave); /* recompute stats just before removing the slave */ bond_get_stats(bond->dev, &bond->bond_stats); - bond_upper_dev_unlink(bond_dev, slave_dev); + bond_upper_dev_unlink(bond, slave); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ @@ -1996,7 +2034,8 @@ static int bond_miimon_inspect(struct bonding *bond) if (link_state) continue; - bond_set_slave_link_state(slave, BOND_LINK_FAIL); + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_LATER); slave->delay = bond->params.downdelay; if (slave->delay) { netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", @@ -2011,7 +2050,8 @@ static int bond_miimon_inspect(struct bonding *bond) case BOND_LINK_FAIL: if (link_state) { /* recovered before downdelay expired */ - bond_set_slave_link_state(slave, BOND_LINK_UP); + bond_set_slave_link_state(slave, BOND_LINK_UP, + BOND_SLAVE_NOTIFY_LATER); slave->last_link_up = jiffies; netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", (bond->params.downdelay - slave->delay) * @@ -2033,7 +2073,8 @@ static int bond_miimon_inspect(struct bonding *bond) if (!link_state) continue; - bond_set_slave_link_state(slave, BOND_LINK_BACK); + bond_set_slave_link_state(slave, BOND_LINK_BACK, + BOND_SLAVE_NOTIFY_LATER); slave->delay = bond->params.updelay; if (slave->delay) { @@ -2047,7 +2088,8 @@ static int bond_miimon_inspect(struct bonding *bond) case BOND_LINK_BACK: if (!link_state) { bond_set_slave_link_state(slave, - BOND_LINK_DOWN); + BOND_LINK_DOWN, + BOND_SLAVE_NOTIFY_LATER); netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", (bond->params.updelay - slave->delay) * bond->params.miimon, @@ -2085,7 +2127,8 @@ static void bond_miimon_commit(struct bonding *bond) continue; case BOND_LINK_UP: - bond_set_slave_link_state(slave, BOND_LINK_UP); + bond_set_slave_link_state(slave, BOND_LINK_UP, + BOND_SLAVE_NOTIFY_NOW); slave->last_link_up = jiffies; primary = rtnl_dereference(bond->primary_slave); @@ -2125,7 +2168,8 @@ static void bond_miimon_commit(struct bonding *bond) if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; - bond_set_slave_link_state(slave, BOND_LINK_DOWN); + bond_set_slave_link_state(slave, BOND_LINK_DOWN, + BOND_SLAVE_NOTIFY_NOW); if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || BOND_MODE(bond) == BOND_MODE_8023AD) @@ -2708,7 +2752,8 @@ static void bond_ab_arp_commit(struct bonding *bond) struct slave *current_arp_slave; current_arp_slave = rtnl_dereference(bond->current_arp_slave); - bond_set_slave_link_state(slave, BOND_LINK_UP); + bond_set_slave_link_state(slave, BOND_LINK_UP, + BOND_SLAVE_NOTIFY_NOW); if (current_arp_slave) { bond_set_slave_inactive_flags( current_arp_slave, @@ -2731,7 +2776,8 @@ static void bond_ab_arp_commit(struct bonding *bond) if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; - bond_set_slave_link_state(slave, BOND_LINK_DOWN); + bond_set_slave_link_state(slave, BOND_LINK_DOWN, + BOND_SLAVE_NOTIFY_NOW); bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); @@ -2810,7 +2856,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) * up when it is actually down */ if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { - bond_set_slave_link_state(slave, BOND_LINK_DOWN); + bond_set_slave_link_state(slave, BOND_LINK_DOWN, + BOND_SLAVE_NOTIFY_LATER); if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; @@ -2830,7 +2877,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) if (!new_slave) goto check_state; - bond_set_slave_link_state(new_slave, BOND_LINK_BACK); + bond_set_slave_link_state(new_slave, BOND_LINK_BACK, + BOND_SLAVE_NOTIFY_LATER); bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); bond_arp_send_all(bond, new_slave); new_slave->last_link_up = jiffies; @@ -2838,7 +2886,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) check_state: bond_for_each_slave_rcu(bond, slave, iter) { - if (slave->should_notify) { + if (slave->should_notify || slave->should_notify_link) { should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; break; } @@ -2893,8 +2941,10 @@ re_arm: if (should_notify_peers) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); - if (should_notify_rtnl) + if (should_notify_rtnl) { bond_slave_state_notify(bond); + bond_slave_link_notify(bond); + } rtnl_unlock(); } @@ -4135,7 +4185,6 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; bond_dev->features |= bond_dev->hw_features; } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index f4ae72086215..e23c3ed737de 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -42,7 +42,6 @@ #include <net/bonding.h> -#define to_dev(obj) container_of(obj, struct device, kobj) #define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) /* "show" function for the bond_masters attribute. @@ -481,7 +480,7 @@ static ssize_t bonding_show_mii_status(struct device *d, char *buf) { struct bonding *bond = to_bond(d); - bool active = !!rcu_access_pointer(bond->curr_active_slave); + bool active = netif_carrier_ok(bond->dev); return sprintf(buf, "%s\n", active ? "up" : "down"); } diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 51670b322409..c71a03593595 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -32,6 +32,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/pm_runtime.h> #define DRIVER_NAME "xilinx_can" @@ -138,7 +139,7 @@ struct xcan_priv { u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, u32 val); - struct net_device *dev; + struct device *dev; void __iomem *reg_base; unsigned long irq_flags; struct clk *bus_clk; @@ -840,6 +841,13 @@ static int xcan_open(struct net_device *ndev) struct xcan_priv *priv = netdev_priv(ndev); int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + return ret; + } + ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, ndev->name, ndev); if (ret < 0) { @@ -847,29 +855,17 @@ static int xcan_open(struct net_device *ndev) goto err; } - ret = clk_prepare_enable(priv->can_clk); - if (ret) { - netdev_err(ndev, "unable to enable device clock\n"); - goto err_irq; - } - - ret = clk_prepare_enable(priv->bus_clk); - if (ret) { - netdev_err(ndev, "unable to enable bus clock\n"); - goto err_can_clk; - } - /* Set chip into reset mode */ ret = set_reset_mode(ndev); if (ret < 0) { netdev_err(ndev, "mode resetting failed!\n"); - goto err_bus_clk; + goto err_irq; } /* Common open */ ret = open_candev(ndev); if (ret) - goto err_bus_clk; + goto err_irq; ret = xcan_chip_start(ndev); if (ret < 0) { @@ -885,13 +881,11 @@ static int xcan_open(struct net_device *ndev) err_candev: close_candev(ndev); -err_bus_clk: - clk_disable_unprepare(priv->bus_clk); -err_can_clk: - clk_disable_unprepare(priv->can_clk); err_irq: free_irq(ndev->irq, ndev); err: + pm_runtime_put(priv->dev); + return ret; } @@ -908,12 +902,11 @@ static int xcan_close(struct net_device *ndev) netif_stop_queue(ndev); napi_disable(&priv->napi); xcan_chip_stop(ndev); - clk_disable_unprepare(priv->bus_clk); - clk_disable_unprepare(priv->can_clk); free_irq(ndev->irq, ndev); close_candev(ndev); can_led_event(ndev, CAN_LED_EVENT_STOP); + pm_runtime_put(priv->dev); return 0; } @@ -932,27 +925,20 @@ static int xcan_get_berr_counter(const struct net_device *ndev, struct xcan_priv *priv = netdev_priv(ndev); int ret; - ret = clk_prepare_enable(priv->can_clk); - if (ret) - goto err; - - ret = clk_prepare_enable(priv->bus_clk); - if (ret) - goto err_clk; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + return ret; + } bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); - clk_disable_unprepare(priv->bus_clk); - clk_disable_unprepare(priv->can_clk); + pm_runtime_put(priv->dev); return 0; - -err_clk: - clk_disable_unprepare(priv->can_clk); -err: - return ret; } @@ -965,15 +951,45 @@ static const struct net_device_ops xcan_netdev_ops = { /** * xcan_suspend - Suspend method for the driver - * @dev: Address of the platform_device structure + * @dev: Address of the device structure * * Put the driver into low power mode. - * Return: 0 always + * Return: 0 on success and failure value on error */ static int __maybe_unused xcan_suspend(struct device *dev) { - struct platform_device *pdev = dev_get_drvdata(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + if (!device_may_wakeup(dev)) + return pm_runtime_force_suspend(dev); + + return 0; +} + +/** + * xcan_resume - Resume from suspend + * @dev: Address of the device structure + * + * Resume operation after suspend. + * Return: 0 on success and failure value on error + */ +static int __maybe_unused xcan_resume(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_resume(dev); + + return 0; + +} + +/** + * xcan_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 always + */ +static int __maybe_unused xcan_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -984,43 +1000,55 @@ static int __maybe_unused xcan_suspend(struct device *dev) priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); priv->can.state = CAN_STATE_SLEEPING; - clk_disable(priv->bus_clk); - clk_disable(priv->can_clk); + clk_disable_unprepare(priv->bus_clk); + clk_disable_unprepare(priv->can_clk); return 0; } /** - * xcan_resume - Resume from suspend - * @dev: Address of the platformdevice structure + * xcan_runtime_resume - Runtime resume from suspend + * @dev: Address of the device structure * * Resume operation after suspend. * Return: 0 on success and failure value on error */ -static int __maybe_unused xcan_resume(struct device *dev) +static int __maybe_unused xcan_runtime_resume(struct device *dev) { - struct platform_device *pdev = dev_get_drvdata(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); int ret; + u32 isr, status; - ret = clk_enable(priv->bus_clk); + ret = clk_prepare_enable(priv->bus_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); return ret; } - ret = clk_enable(priv->can_clk); + ret = clk_prepare_enable(priv->can_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); clk_disable_unprepare(priv->bus_clk); return ret; } - priv->write_reg(priv, XCAN_MSR_OFFSET, 0); - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + status = priv->read_reg(priv, XCAN_SR_OFFSET); if (netif_running(ndev)) { + if (isr & XCAN_IXR_BSOFF_MASK) { + priv->can.state = CAN_STATE_BUS_OFF; + priv->write_reg(priv, XCAN_SRR_OFFSET, + XCAN_SRR_RESET_MASK); + } else if ((status & XCAN_SR_ESTAT_MASK) == + XCAN_SR_ESTAT_MASK) { + priv->can.state = CAN_STATE_ERROR_PASSIVE; + } else if (status & XCAN_SR_ERRWRN_MASK) { + priv->can.state = CAN_STATE_ERROR_WARNING; + } else { + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } netif_device_attach(ndev); netif_start_queue(ndev); } @@ -1028,7 +1056,10 @@ static int __maybe_unused xcan_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume); +static const struct dev_pm_ops xcan_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) + SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) +}; /** * xcan_probe - Platform registration call @@ -1069,7 +1100,7 @@ static int xcan_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(ndev); - priv->dev = ndev; + priv->dev = &pdev->dev; priv->can.bittiming_const = &xcan_bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; @@ -1111,21 +1142,17 @@ static int xcan_probe(struct platform_device *pdev) } } - ret = clk_prepare_enable(priv->can_clk); - if (ret) { - dev_err(&pdev->dev, "unable to enable device clock\n"); - goto err_free; - } - - ret = clk_prepare_enable(priv->bus_clk); - if (ret) { - dev_err(&pdev->dev, "unable to enable bus clock\n"); - goto err_unprepare_disable_dev; - } - priv->write_reg = xcan_write_reg_le; priv->read_reg = xcan_read_reg_le; + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + goto err_pmdisable; + } + if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { priv->write_reg = xcan_write_reg_be; priv->read_reg = xcan_read_reg_be; @@ -1138,22 +1165,23 @@ static int xcan_probe(struct platform_device *pdev) ret = register_candev(ndev); if (ret) { dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); - goto err_unprepare_disable_busclk; + goto err_disableclks; } devm_can_led_init(ndev); - clk_disable_unprepare(priv->bus_clk); - clk_disable_unprepare(priv->can_clk); + + pm_runtime_put(&pdev->dev); + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, priv->tx_max); return 0; -err_unprepare_disable_busclk: - clk_disable_unprepare(priv->bus_clk); -err_unprepare_disable_dev: - clk_disable_unprepare(priv->can_clk); +err_disableclks: + pm_runtime_put(priv->dev); +err_pmdisable: + pm_runtime_disable(&pdev->dev); err_free: free_candev(ndev); err: @@ -1172,10 +1200,8 @@ static int xcan_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); - if (set_reset_mode(ndev) < 0) - netdev_err(ndev, "mode resetting failed!\n"); - unregister_candev(ndev); + pm_runtime_disable(&pdev->dev); netif_napi_del(&priv->napi); free_candev(ndev); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index b06dba05594a..9fe33fc3c2b9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -19,6 +19,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/gpio/consumer.h> #include <linux/phy.h> #include <net/dsa.h> #include <net/switchdev.h> @@ -616,98 +617,112 @@ static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val) } static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = { - { "in_good_octets", 8, 0x00, }, - { "in_bad_octets", 4, 0x02, }, - { "in_unicast", 4, 0x04, }, - { "in_broadcasts", 4, 0x06, }, - { "in_multicasts", 4, 0x07, }, - { "in_pause", 4, 0x16, }, - { "in_undersize", 4, 0x18, }, - { "in_fragments", 4, 0x19, }, - { "in_oversize", 4, 0x1a, }, - { "in_jabber", 4, 0x1b, }, - { "in_rx_error", 4, 0x1c, }, - { "in_fcs_error", 4, 0x1d, }, - { "out_octets", 8, 0x0e, }, - { "out_unicast", 4, 0x10, }, - { "out_broadcasts", 4, 0x13, }, - { "out_multicasts", 4, 0x12, }, - { "out_pause", 4, 0x15, }, - { "excessive", 4, 0x11, }, - { "collisions", 4, 0x1e, }, - { "deferred", 4, 0x05, }, - { "single", 4, 0x14, }, - { "multiple", 4, 0x17, }, - { "out_fcs_error", 4, 0x03, }, - { "late", 4, 0x1f, }, - { "hist_64bytes", 4, 0x08, }, - { "hist_65_127bytes", 4, 0x09, }, - { "hist_128_255bytes", 4, 0x0a, }, - { "hist_256_511bytes", 4, 0x0b, }, - { "hist_512_1023bytes", 4, 0x0c, }, - { "hist_1024_max_bytes", 4, 0x0d, }, - /* Not all devices have the following counters */ - { "sw_in_discards", 4, 0x110, }, - { "sw_in_filtered", 2, 0x112, }, - { "sw_out_filtered", 2, 0x113, }, - + { "in_good_octets", 8, 0x00, BANK0, }, + { "in_bad_octets", 4, 0x02, BANK0, }, + { "in_unicast", 4, 0x04, BANK0, }, + { "in_broadcasts", 4, 0x06, BANK0, }, + { "in_multicasts", 4, 0x07, BANK0, }, + { "in_pause", 4, 0x16, BANK0, }, + { "in_undersize", 4, 0x18, BANK0, }, + { "in_fragments", 4, 0x19, BANK0, }, + { "in_oversize", 4, 0x1a, BANK0, }, + { "in_jabber", 4, 0x1b, BANK0, }, + { "in_rx_error", 4, 0x1c, BANK0, }, + { "in_fcs_error", 4, 0x1d, BANK0, }, + { "out_octets", 8, 0x0e, BANK0, }, + { "out_unicast", 4, 0x10, BANK0, }, + { "out_broadcasts", 4, 0x13, BANK0, }, + { "out_multicasts", 4, 0x12, BANK0, }, + { "out_pause", 4, 0x15, BANK0, }, + { "excessive", 4, 0x11, BANK0, }, + { "collisions", 4, 0x1e, BANK0, }, + { "deferred", 4, 0x05, BANK0, }, + { "single", 4, 0x14, BANK0, }, + { "multiple", 4, 0x17, BANK0, }, + { "out_fcs_error", 4, 0x03, BANK0, }, + { "late", 4, 0x1f, BANK0, }, + { "hist_64bytes", 4, 0x08, BANK0, }, + { "hist_65_127bytes", 4, 0x09, BANK0, }, + { "hist_128_255bytes", 4, 0x0a, BANK0, }, + { "hist_256_511bytes", 4, 0x0b, BANK0, }, + { "hist_512_1023bytes", 4, 0x0c, BANK0, }, + { "hist_1024_max_bytes", 4, 0x0d, BANK0, }, + { "sw_in_discards", 4, 0x10, PORT, }, + { "sw_in_filtered", 2, 0x12, PORT, }, + { "sw_out_filtered", 2, 0x13, PORT, }, + { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, }; -static bool have_sw_in_discards(struct dsa_switch *ds) +static bool mv88e6xxx_has_stat(struct dsa_switch *ds, + struct mv88e6xxx_hw_stat *stat) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - switch (ps->id) { - case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161: - case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171: - case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176: - case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185: - case PORT_SWITCH_ID_6352: + switch (stat->type) { + case BANK0: return true; - default: - return false; - } -} - -static void _mv88e6xxx_get_strings(struct dsa_switch *ds, - int nr_stats, - struct mv88e6xxx_hw_stat *stats, - int port, uint8_t *data) -{ - int i; - - for (i = 0; i < nr_stats; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - stats[i].string, ETH_GSTRING_LEN); + case BANK1: + return mv88e6xxx_6320_family(ds); + case PORT: + return mv88e6xxx_6095_family(ds) || + mv88e6xxx_6185_family(ds) || + mv88e6xxx_6097_family(ds) || + mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || + mv88e6xxx_6352_family(ds); } + return false; } static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, - int stat, - struct mv88e6xxx_hw_stat *stats, + struct mv88e6xxx_hw_stat *s, int port) { - struct mv88e6xxx_hw_stat *s = stats + stat; u32 low; u32 high = 0; int ret; u64 value; - if (s->reg >= 0x100) { - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), - s->reg - 0x100); + switch (s->type) { + case PORT: + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg); if (ret < 0) return UINT64_MAX; low = ret; if (s->sizeof_stat == 4) { ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), - s->reg - 0x100 + 1); + s->reg + 1); if (ret < 0) return UINT64_MAX; high = ret; } - } else { + break; + case BANK0: + case BANK1: _mv88e6xxx_stats_read(ds, s->reg, &low); if (s->sizeof_stat == 8) _mv88e6xxx_stats_read(ds, s->reg + 1, &high); @@ -716,61 +731,59 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, return value; } -static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, - int nr_stats, - struct mv88e6xxx_hw_stat *stats, - int port, uint64_t *data) +void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - int i; - - mutex_lock(&ps->smi_mutex); + struct mv88e6xxx_hw_stat *stat; + int i, j; - ret = _mv88e6xxx_stats_snapshot(ds, port); - if (ret < 0) { - mutex_unlock(&ps->smi_mutex); - return; + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(ds, stat)) { + memcpy(data + j * ETH_GSTRING_LEN, stat->string, + ETH_GSTRING_LEN); + j++; + } } - - /* Read each of the counters. */ - for (i = 0; i < nr_stats; i++) - data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port); - - mutex_unlock(&ps->smi_mutex); -} - -/* All the statistics in the table */ -void -mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) -{ - if (have_sw_in_discards(ds)) - _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats), - mv88e6xxx_hw_stats, port, data); - else - _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3, - mv88e6xxx_hw_stats, port, data); } int mv88e6xxx_get_sset_count(struct dsa_switch *ds) { - if (have_sw_in_discards(ds)) - return ARRAY_SIZE(mv88e6xxx_hw_stats); - return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3; + struct mv88e6xxx_hw_stat *stat; + int i, j; + + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(ds, stat)) + j++; + } + return j; } void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { - if (have_sw_in_discards(ds)) - _mv88e6xxx_get_ethtool_stats( - ds, ARRAY_SIZE(mv88e6xxx_hw_stats), - mv88e6xxx_hw_stats, port, data); - else - _mv88e6xxx_get_ethtool_stats( - ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3, - mv88e6xxx_hw_stats, port, data); + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int ret; + int i, j; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_stats_snapshot(ds, port); + if (ret < 0) { + mutex_unlock(&ps->smi_mutex); + return; + } + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(ds, stat)) { + data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port); + j++; + } + } + + mutex_unlock(&ps->smi_mutex); } int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) @@ -2323,6 +2336,7 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 is_reset = (ppu_active ? 0x8800 : 0xc800); + struct gpio_desc *gpiod = ds->pd->reset; unsigned long timeout; int ret; int i; @@ -2336,6 +2350,14 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); + /* If there is a gpio connected to the reset pin, toggle it */ + if (gpiod) { + gpiod_set_value_cansleep(gpiod, 1); + usleep_range(10000, 20000); + gpiod_set_value_cansleep(gpiod, 0); + usleep_range(10000, 20000); + } + /* Reset the switch. Keep the PPU active if requested. The PPU * needs to be active to support indirect phy register access * through global registers 0x18 and 0x19. diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 21c8daa03f78..ca08f913d302 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -288,6 +288,7 @@ #define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY) #define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY) #define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_BANK_1 BIT(9) #define GLOBAL_STATS_COUNTER_32 0x1e #define GLOBAL_STATS_COUNTER_01 0x1f @@ -420,10 +421,17 @@ struct mv88e6xxx_priv_state { struct work_struct bridge_work; }; +enum stat_type { + BANK0, + BANK1, + PORT, +}; + struct mv88e6xxx_hw_stat { char string[ETH_GSTRING_LEN]; int sizeof_stat; int reg; + enum stat_type type; }; int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active); diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 4547a1b8b958..7677c745fb30 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev) } #ifdef CONFIG_EISA -static int __init el3_eisa_probe (struct device *device) +static int el3_eisa_probe(struct device *device) { short i; int ioaddr, irq, if_port; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2839af00f20c..1c5f3b273e6a 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = { }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); -static int __init vortex_eisa_probe(struct device *device) +static int vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 0443654f0339..c89b9aeeceb6 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -372,7 +372,7 @@ static int ax_mii_probe(struct net_device *dev) ax->phy_dev = phy_dev; netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); + phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); return 0; } @@ -627,7 +627,7 @@ static int ax_mii_init(struct net_device *dev) struct platform_device *pdev = to_platform_device(dev->dev.parent); struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); - int err, i; + int err; ax->bb_ctrl.ops = &bb_ops; ax->addr_memr = ei_local->mem + AX_MEMR; @@ -642,23 +642,12 @@ static int ax_mii_init(struct net_device *dev) snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!ax->mii_bus->irq) { - err = -ENOMEM; - goto out_free_mdio_bitbang; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - ax->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(ax->mii_bus); if (err) - goto out_free_irq; + goto out_free_mdio_bitbang; return 0; - out_free_irq: - kfree(ax->mii_bus->irq); out_free_mdio_bitbang: free_mdio_bitbang(ax->mii_bus); out: diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 31c5e476fd64..0b13af8e4070 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -122,6 +122,7 @@ config FEALNX cards. <http://www.myson.com.tw/> source "drivers/net/ethernet/natsemi/Kconfig" +source "drivers/net/ethernet/netronome/Kconfig" source "drivers/net/ethernet/8390/Kconfig" config NET_NETX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 071f84eb6f3f..38dc1a776a2b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ +obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 096531a73124..74139cb7f849 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -380,9 +380,8 @@ static void bfin_mac_adjust_link(struct net_device *dev) static int mii_probe(struct net_device *dev, int phy_mode) { struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; + struct phy_device *phydev; unsigned short sysctl; - int i; u32 sclk, mdc_div; /* Enable PHY output early */ @@ -396,18 +395,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); bfin_write_EMAC_SYSCTL(sysctl); - /* search for connected PHY device */ - for (i = 0; i < PHY_MAX_ADDR; ++i) { - struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; - - if (!tmp_phydev) - continue; /* no PHY here... */ - - phydev = tmp_phydev; - break; /* found it */ - } - - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(lp->mii_bus); if (!phydev) { netdev_err(dev, "no phy device found\n"); return -ENODEV; @@ -419,7 +407,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) return -EINVAL; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &bfin_mac_adjust_link, phy_mode); if (IS_ERR(phydev)) { @@ -444,10 +432,8 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_duplex = -1; lp->phydev = phydev; - pr_info("attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq, - MDC_CLK, mdc_div, sclk/1000000); + phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + MDC_CLK, mdc_div, sclk / 1000000); return 0; } @@ -1840,12 +1826,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!miibus->irq) - goto out_err_irq_alloc; - - for (i = rc; i < PHY_MAX_ADDR; ++i) - miibus->irq[i] = PHY_POLL; rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); if (rc != mii_bus_pd->phydev_number) @@ -1864,14 +1844,12 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) rc = mdiobus_register(miibus); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto out_err_mdiobus_register; + goto out_err_irq_alloc; } platform_set_drvdata(pdev, miibus); return 0; -out_err_mdiobus_register: - kfree(miibus->irq); out_err_irq_alloc: mdiobus_free(miibus); out_err_alloc: @@ -1887,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev) dev_get_platdata(&pdev->dev); mdiobus_unregister(miibus); - kfree(miibus->irq); mdiobus_free(miibus); peripheral_free_list(mii_bus_pd->mac_peripherals); @@ -1912,21 +1889,21 @@ static struct platform_driver bfin_mac_driver = { }, }; +static struct platform_driver * const drivers[] = { + &bfin_mii_bus_driver, + &bfin_mac_driver, +}; + static int __init bfin_mac_init(void) { - int ret; - ret = platform_driver_register(&bfin_mii_bus_driver); - if (!ret) - return platform_driver_register(&bfin_mac_driver); - return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(bfin_mac_init); static void __exit bfin_mac_cleanup(void) { - platform_driver_unregister(&bfin_mac_driver); - platform_driver_unregister(&bfin_mii_bus_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(bfin_mac_cleanup); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 20bf55dbd76f..b873531c5575 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1337,11 +1337,6 @@ static int greth_mdio_init(struct greth_private *greth) greth->mdio->write = greth_mdio_write; greth->mdio->priv = greth; - greth->mdio->irq = greth->mdio_irqs; - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - greth->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(greth->mdio); if (ret) { goto error; diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index ae16ac94daf8..92dd918e4a83 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -125,7 +125,6 @@ struct greth_private { struct phy_device *phy; struct mii_bus *mdio; - int mdio_irqs[PHY_MAX_ADDR]; unsigned int link; unsigned int speed; unsigned int duplex; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index e0f3d197e7f2..3f3bcbea15bd 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -1235,7 +1235,7 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) if (!phydev) return -EIO; - return et131x_phy_mii_read(adapter, phydev->addr, reg, value); + return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value); } static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, @@ -1462,7 +1462,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; - et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); + et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data); } /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ @@ -1490,7 +1490,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); - et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2); } } @@ -3192,14 +3192,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3212,8 +3212,8 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_CONFIG, ®); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, - reg); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_CONFIG, reg); } et131x_set_rx_dma_timer(adapter); @@ -3226,14 +3226,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_INDEX_REG, register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_DATA_REG, register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3265,7 +3265,7 @@ static int et131x_mii_probe(struct net_device *netdev) return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &et131x_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -3289,9 +3289,7 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->autoneg = AUTONEG_ENABLE; adapter->phydev = phydev; - dev_info(&adapter->pdev->dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -3327,7 +3325,6 @@ static void et131x_pci_remove(struct pci_dev *pdev) netif_napi_del(&adapter->napi); phy_disconnect(adapter->phydev); mdiobus_unregister(adapter->mii_bus); - kfree(adapter->mii_bus->irq); mdiobus_free(adapter->mii_bus); et131x_adapter_memory_free(adapter); @@ -3946,7 +3943,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, struct net_device *netdev; struct et131x_adapter *adapter; int rc; - int ii; rc = pci_enable_device(pdev); if (rc < 0) { @@ -4036,18 +4032,11 @@ static int et131x_pci_setup(struct pci_dev *pdev, adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; - adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!adapter->mii_bus->irq) - goto err_mdio_free; - - for (ii = 0; ii < PHY_MAX_ADDR; ii++) - adapter->mii_bus->irq[ii] = PHY_POLL; rc = mdiobus_register(adapter->mii_bus); if (rc < 0) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_mdio_free_irq; + goto err_mdio_free; } rc = et131x_mii_probe(netdev); @@ -4087,8 +4076,6 @@ err_phy_disconnect: phy_disconnect(adapter->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); -err_mdio_free_irq: - kfree(adapter->mii_bus->irq); err_mdio_free: mdiobus_free(adapter->mii_bus); err_mem_free: diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index fe644823ceaf..17472851674f 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -131,7 +131,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) { struct altera_tse_private *priv = netdev_priv(dev); int ret; - int i; struct device_node *mdio_node = NULL; struct mii_bus *mdio = NULL; struct device_node *child_node = NULL; @@ -161,14 +160,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->write = &altera_tse_mdio_write; snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); - mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (mdio->irq == NULL) { - ret = -ENOMEM; - goto out_free_mdio; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mdio->irq[i] = PHY_POLL; - mdio->priv = dev; mdio->parent = priv->device; @@ -176,7 +167,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) if (ret != 0) { netdev_err(dev, "Cannot register MDIO bus %s\n", mdio->id); - goto out_free_mdio_irq; + goto out_free_mdio; } if (netif_msg_drv(priv)) @@ -184,8 +175,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) priv->mdio = mdio; return 0; -out_free_mdio_irq: - kfree(mdio->irq); out_free_mdio: mdiobus_free(mdio); mdio = NULL; @@ -855,7 +844,7 @@ static int init_phy(struct net_device *dev) } netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", - phydev->addr, phydev->phy_id, phydev->link); + phydev->mdio.addr, phydev->phy_id, phydev->link); priv->phydev = phydev; return 0; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 5330bcb8a944..d3977d032b48 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -344,9 +344,6 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr, static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { - /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does - * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) - */ struct net_device *const dev = bus->priv; /* make sure the MAC associated with this @@ -502,7 +499,7 @@ static int au1000_mii_probe(struct net_device *dev) BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); if (aup->phy_addr) - phydev = aup->mii_bus->phy_map[aup->phy_addr]; + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); else netdev_info(dev, "using PHY-less setup\n"); return 0; @@ -512,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev) * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (aup->mii_bus->phy_map[phy_addr]) { - phydev = aup->mii_bus->phy_map[phy_addr]; + if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); if (!aup->phy_search_highest_addr) /* break out with first one found */ break; @@ -531,7 +528,8 @@ static int au1000_mii_probe(struct net_device *dev) */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { struct phy_device *const tmp_phydev = - aup->mii_bus->phy_map[phy_addr]; + mdiobus_get_phy(aup->mii_bus, + phy_addr); if (aup->mac_id == 1) break; @@ -558,7 +556,7 @@ static int au1000_mii_probe(struct net_device *dev) /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &au1000_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -583,9 +581,7 @@ static int au1000_mii_probe(struct net_device *dev) aup->old_duplex = -1; aup->phy_dev = phydev; - netdev_info(dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -1293,14 +1289,7 @@ static int au1000_probe(struct platform_device *pdev) aup->mii_bus->name = "au1000_eth_mii"; snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); - aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) { - err = -ENOMEM; - goto err_out; - } - for (i = 0; i < PHY_MAX_ADDR; ++i) - aup->mii_bus->irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ if (aup->phy_static_config) if (aup->phy_irq && aup->phy_busid == aup->mac_id) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 970781a9e677..f6a7161e3b85 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata) usleep_range(10, 15); /* Poll Until Poll Condition */ - while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) usleep_range(500, 600); if (!count) @@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; - while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, + while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) usleep_range(500, 600); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 53ce1222b11d..8a9b493566c9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2024,7 +2024,6 @@ read_again: skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); - skb_mark_napi_id(skb, napi); napi_gro_receive(napi, skb); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c31e691d11fc..db55c9f6e8e1 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -869,7 +869,7 @@ void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) pdata->mdio_bus = NULL; } -struct xgene_mac_ops xgene_gmac_ops = { +const struct xgene_mac_ops xgene_gmac_ops = { .init = xgene_gmac_init, .reset = xgene_gmac_reset, .rx_enable = xgene_gmac_rx_enable, @@ -879,7 +879,7 @@ struct xgene_mac_ops xgene_gmac_ops = { .set_mac_addr = xgene_gmac_set_mac_addr, }; -struct xgene_port_ops xgene_gport_ops = { +const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index c153a1dc5ff7..8a9091039ab4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -340,8 +340,8 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); -extern struct xgene_mac_ops xgene_gmac_ops; -extern struct xgene_port_ops xgene_gport_ops; +extern const struct xgene_mac_ops xgene_gmac_ops; +extern const struct xgene_port_ops xgene_gport_ops; extern struct xgene_ring_ops xgene_ring1_ops; #endif /* __XGENE_ENET_HW_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 9147a0107c44..a4799c1fc7d4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, struct sk_buff *skb) { struct device *dev = ndev_to_dev(tx_ring->ndev); + struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); struct xgene_enet_raw_desc *raw_desc; __le64 *exp_desc = NULL, *exp_bufs = NULL; dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; @@ -419,6 +420,7 @@ out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; + pdata->tx_level += count; tx_ring->tail = tail; return count; @@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; - struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; - u32 tx_level, cq_level; + u32 tx_level = pdata->tx_level; int count; - tx_level = pdata->ring_ops->len(tx_ring); - cq_level = pdata->ring_ops->len(cp_ring); - if (unlikely(tx_level > pdata->tx_qcnt_hi || - cq_level > pdata->cp_qcnt_hi)) { + if (tx_level < pdata->txc_level) + tx_level += ((typeof(pdata->tx_level))~0U); + + if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } @@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; - int ret, count = 0, processed = 0; + int ret, desc_count, count = 0, processed = 0; + bool is_completion; do { raw_desc = &ring->raw_desc[head]; + desc_count = 0; + is_completion = false; exp_desc = NULL; if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; @@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, } dma_rmb(); count++; + desc_count++; } - if (is_rx_desc(raw_desc)) + if (is_rx_desc(raw_desc)) { ret = xgene_enet_rx_frame(ring, raw_desc); - else + } else { ret = xgene_enet_tx_completion(ring, raw_desc); + is_completion = true; + } xgene_enet_mark_desc_slot_empty(raw_desc); if (exp_desc) xgene_enet_mark_desc_slot_empty(exp_desc); head = (head + 1) & slots; count++; + desc_count++; processed++; + if (is_completion) + pdata->txc_level += desc_count; if (ret) break; @@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; - if (netif_queue_stopped(ring->ndev)) { - if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) - netif_wake_queue(ring->ndev); - } + if (netif_queue_stopped(ring->ndev)) + netif_start_queue(ring->ndev); } return processed; @@ -682,7 +690,7 @@ static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) static int xgene_enet_open(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_mac_ops *mac_ops = pdata->mac_ops; + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; mac_ops->tx_enable(pdata); @@ -706,7 +714,7 @@ static int xgene_enet_open(struct net_device *ndev) static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_mac_ops *mac_ops = pdata->mac_ops; + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; netif_stop_queue(ndev); @@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) pdata->tx_ring->cp_ring = cp_ring; pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); - pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; - pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; - pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; + pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; return 0; @@ -1084,7 +1090,7 @@ static const struct net_device_ops xgene_ndev_ops = { }; #ifdef CONFIG_ACPI -static int xgene_get_port_id_acpi(struct device *dev, +static void xgene_get_port_id_acpi(struct device *dev, struct xgene_enet_pdata *pdata) { acpi_status status; @@ -1097,24 +1103,19 @@ static int xgene_get_port_id_acpi(struct device *dev, pdata->port_id = temp; } - return 0; + return; } #endif -static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) +static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) { u32 id = 0; - int ret; - ret = of_property_read_u32(dev->of_node, "port-id", &id); - if (ret) { - pdata->port_id = 0; - ret = 0; - } else { - pdata->port_id = id & BIT(0); - } + of_property_read_u32(dev->of_node, "port-id", &id); - return ret; + pdata->port_id = id & BIT(0); + + return; } static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) @@ -1209,13 +1210,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } if (dev->of_node) - ret = xgene_get_port_id_dt(dev, pdata); + xgene_get_port_id_dt(dev, pdata); #ifdef CONFIG_ACPI else - ret = xgene_get_port_id_acpi(dev, pdata); + xgene_get_port_id_acpi(dev, pdata); #endif - if (ret) - return ret; if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) eth_hw_addr_random(ndev); @@ -1423,7 +1422,7 @@ static int xgene_enet_probe(struct platform_device *pdev) struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; - struct xgene_mac_ops *mac_ops; + const struct xgene_mac_ops *mac_ops; const struct of_device_id *of_id; int ret; @@ -1510,7 +1509,7 @@ err: static int xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; - struct xgene_mac_ops *mac_ops; + const struct xgene_mac_ops *mac_ops; struct net_device *ndev; pdata = platform_get_drvdata(pdev); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index a6e56b88c0a0..70d5b62c125a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -155,11 +155,11 @@ struct xgene_enet_pdata { enum xgene_enet_id enet_id; struct xgene_enet_desc_ring *tx_ring; struct xgene_enet_desc_ring *rx_ring; + u16 tx_level; + u16 txc_level; char *dev_name; u32 rx_buff_cnt; u32 tx_qcnt_hi; - u32 cp_qcnt_hi; - u32 cp_qcnt_low; u32 rx_irq; u32 txc_irq; u8 cq_cnt; @@ -174,8 +174,8 @@ struct xgene_enet_pdata { int phy_mode; enum xgene_enet_rm rm; struct rtnl_link_stats64 stats; - struct xgene_mac_ops *mac_ops; - struct xgene_port_ops *port_ops; + const struct xgene_mac_ops *mac_ops; + const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; struct delayed_work link_work; u32 port_id; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 05b817e56fde..78475512b683 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -405,7 +405,7 @@ static void xgene_enet_link_state(struct work_struct *work) schedule_delayed_work(&p->link_work, poll_interval); } -struct xgene_mac_ops xgene_sgmac_ops = { +const struct xgene_mac_ops xgene_sgmac_ops = { .init = xgene_sgmac_init, .reset = xgene_sgmac_reset, .rx_enable = xgene_sgmac_rx_enable, @@ -416,7 +416,7 @@ struct xgene_mac_ops xgene_sgmac_ops = { .link_state = xgene_enet_link_state }; -struct xgene_port_ops xgene_sgport_ops = { +const struct xgene_port_ops xgene_sgport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_enet_shutdown diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h index de432465009c..29a71b4dcc44 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -35,7 +35,7 @@ #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc -extern struct xgene_mac_ops xgene_sgmac_ops; -extern struct xgene_port_ops xgene_sgport_ops; +extern const struct xgene_mac_ops xgene_sgmac_ops; +extern const struct xgene_port_ops xgene_sgport_ops; #endif /* __XGENE_ENET_SGMAC_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 7a28a48cb2c7..ba030dc1940b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -326,7 +326,7 @@ static void xgene_enet_link_state(struct work_struct *work) schedule_delayed_work(&pdata->link_work, poll_interval); } -struct xgene_mac_ops xgene_xgmac_ops = { +const struct xgene_mac_ops xgene_xgmac_ops = { .init = xgene_xgmac_init, .reset = xgene_xgmac_reset, .rx_enable = xgene_xgmac_rx_enable, @@ -338,7 +338,7 @@ struct xgene_mac_ops xgene_xgmac_ops = { .link_state = xgene_enet_link_state }; -struct xgene_port_ops xgene_xgport_ops = { +const struct xgene_port_ops xgene_xgport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_xgcle_bypass, .shutdown = xgene_enet_shutdown, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index f8f908dbf51c..0a2dca8a1725 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -69,7 +69,7 @@ #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 -extern struct xgene_mac_ops xgene_xgmac_ops; -extern struct xgene_port_ops xgene_xgport_ops; +extern const struct xgene_mac_ops xgene_xgmac_ops; +extern const struct xgene_port_ops xgene_xgport_ops; #endif /* __XGENE_ENET_XGMAC_H__ */ diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 52a6b16f57d2..689045186064 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -34,9 +34,9 @@ config EMAC_ROCKCHIP select ARC_EMAC_CORE depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA ---help--- - Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the - emac device driver. This driver is used for RK3066/RK3188 + emac device driver. This driver is used for RK3036/RK3066/RK3188 EMAC ethernet controller. endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c31c7407b753..85e821ccfcd2 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -25,17 +25,13 @@ #include "emac.h" #define DRV_NAME "rockchip_emac" -#define DRV_VERSION "1.0" - -#define GRF_MODE_MII (1UL << 0) -#define GRF_MODE_RMII (0UL << 0) -#define GRF_SPEED_10M (0UL << 1) -#define GRF_SPEED_100M (1UL << 1) -#define GRF_SPEED_ENABLE_BIT (1UL << 17) -#define GRF_MODE_ENABLE_BIT (1UL << 16) +#define DRV_VERSION "1.1" struct emac_rockchip_soc_data { - int grf_offset; + unsigned int grf_offset; + unsigned int grf_mode_offset; + unsigned int grf_speed_offset; + bool need_div_macclk; }; struct rockchip_priv_data { @@ -44,23 +40,22 @@ struct rockchip_priv_data { const struct emac_rockchip_soc_data *soc_data; struct regulator *regulator; struct clk *refclk; + struct clk *macclk; }; static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) { struct rockchip_priv_data *emac = priv; + u32 speed_offset = emac->soc_data->grf_speed_offset; u32 data; int err = 0; - /* write-enable bits */ - data = GRF_SPEED_ENABLE_BIT; - switch(speed) { case 10: - data |= GRF_SPEED_10M; + data = (1 << (speed_offset + 16)) | (0 << speed_offset); break; case 100: - data |= GRF_SPEED_100M; + data = (1 << (speed_offset + 16)) | (1 << speed_offset); break; default: pr_err("speed %u not supported\n", speed); @@ -72,14 +67,25 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) pr_err("unable to apply speed %u to grf (%d)\n", speed, err); } -static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { - { .grf_offset = 0x154 }, /* rk3066 */ - { .grf_offset = 0x0a4 }, /* rk3188 */ +static const struct emac_rockchip_soc_data emac_rk3036_emac_data = { + .grf_offset = 0x140, .grf_mode_offset = 8, + .grf_speed_offset = 9, .need_div_macclk = 1, +}; + +static const struct emac_rockchip_soc_data emac_rk3066_emac_data = { + .grf_offset = 0x154, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, +}; + +static const struct emac_rockchip_soc_data emac_rk3188_emac_data = { + .grf_offset = 0x0a4, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, }; static const struct of_device_id emac_rockchip_dt_ids[] = { - { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, - { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data }, + { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data }, { /* Sentinel */ } }; @@ -110,7 +116,7 @@ static int emac_rockchip_probe(struct platform_device *pdev) interface = of_get_phy_mode(dev->of_node); - /* RK3066 and RK3188 SoCs only support RMII */ + /* RK3036/RK3066/RK3188 SoCs only support RMII */ if (interface != PHY_INTERFACE_MODE_RMII) { dev_err(dev, "unsupported phy interface mode %d\n", interface); err = -ENOTSUPP; @@ -164,15 +170,12 @@ static int emac_rockchip_probe(struct platform_device *pdev) } } - err = arc_emac_probe(ndev, interface); - if (err) - goto out_regulator_disable; - - /* write-enable bits */ - data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; - - data |= GRF_SPEED_100M; - data |= GRF_MODE_RMII; + /* Set speed 100M */ + data = (1 << (priv->soc_data->grf_speed_offset + 16)) | + (1 << priv->soc_data->grf_speed_offset); + /* Set RMII mode */ + data |= (1 << (priv->soc_data->grf_mode_offset + 16)) | + (0 << priv->soc_data->grf_mode_offset); err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); if (err) { @@ -184,6 +187,33 @@ static int emac_rockchip_probe(struct platform_device *pdev) err = clk_set_rate(priv->refclk, 50000000); if (err) dev_err(dev, "failed to change reference clock rate (%d)\n", err); + + if (priv->soc_data->need_div_macclk) { + priv->macclk = devm_clk_get(dev, "macclk"); + if (IS_ERR(priv->macclk)) { + dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk)); + err = PTR_ERR(priv->macclk); + goto out_regulator_disable; + } + + err = clk_prepare_enable(priv->macclk); + if (err) { + dev_err(dev, "failed to enable mac clock (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII TX/RX needs always a rate of 25MHz */ + err = clk_set_rate(priv->macclk, 25000000); + if (err) + dev_err(dev, "failed to change mac clock rate (%d)\n", err); + } + + err = arc_emac_probe(ndev, interface); + if (err) { + dev_err(dev, "failed to probe arc emac (%d)\n", err); + goto out_regulator_disable; + } + return 0; out_regulator_disable: diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 7712f068f6d4..1fe35e453d43 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -958,13 +958,13 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); - raw_mtu = hw->mtu + ETH_HLEN; - alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); - if (raw_mtu > ALX_MTU_JUMBO_TH) + raw_mtu = ALX_RAW_MTU(hw->mtu); + alx_write_mem32(hw, ALX_MTU, raw_mtu); + if (raw_mtu > (ALX_MTU_JUMBO_TH + ETH_FCS_LEN + VLAN_HLEN)) hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; - if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) - val = (raw_mtu + 8 + 7) >> 3; + if (raw_mtu < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 7) >> 3; else val = ALX_TXQ1_JUMBO_TSO_TH >> 3; alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 15548802d6f8..f289c05f5cb4 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/mdio.h> #include <linux/pci.h> +#include <linux/if_vlan.h> #include "reg.h" /* Transmit Packet Descriptor, contains 4 32-bit words. @@ -343,12 +344,14 @@ struct alx_rrd { ALX_RSS_HASH_TYPE_IPV4_TCP | \ ALX_RSS_HASH_TYPE_IPV6 | \ ALX_RSS_HASH_TYPE_IPV6_TCP) -#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_FRAME_PAD 16 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MAX_FRAME_LEN(_mtu) (ALIGN((ALX_RAW_MTU(_mtu) + ALX_FRAME_PAD), 8)) +#define ALX_DEF_RXBUF_SIZE ALX_MAX_FRAME_LEN(1500) #define ALX_MAX_JUMBO_PKT_SIZE (9*1024) #define ALX_MAX_TSO_PKT_SIZE (7*1024) #define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE -#define ALX_MIN_FRAME_SIZE 68 -#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MIN_FRAME_SIZE (ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) #define ALX_MAX_RX_QUEUES 8 #define ALX_MAX_TX_QUEUES 4 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index bd377a6b067d..55b118e876fd 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -577,7 +577,6 @@ static int alx_alloc_rings(struct alx_priv *alx) alx->int_mask &= ~ALX_ISR_ALL_QUEUES; alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; - alx->tx_ringsz = alx->tx_ringsz; netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); @@ -705,7 +704,7 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; - alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -806,7 +805,7 @@ static void alx_reinit(struct alx_priv *alx) static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); - int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int max_frame = ALX_MAX_FRAME_LEN(mtu); if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -817,8 +816,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; - alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? - ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 2795d6db10e1..8b5988e210d5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 8 * 4; - ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, - &ring_header->dma); + ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); if (unlikely(!ring_header->desc)) { - dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); + dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); goto err_nomem; } - memset(ring_header->desc, 0, ring_header->size); /* init TPD ring */ tpd_ring[0].dma = roundup(ring_header->dma, 8); diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index a3c7106fdf85..8ba7f8ff3434 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig @@ -13,6 +13,7 @@ if NET_VENDOR_AURORA config AURORA_NB8800 tristate "Aurora AU-NB8800 support" + depends on HAS_DMA select PHYLIB help Support for the AU-NB8800 gigabit Ethernet controller. diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a3b1c07ae0af..74f0a37c4eb6 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2263,24 +2263,16 @@ static int b44_register_phy_one(struct b44 *bp) mii_bus->parent = sdev->dev; mii_bus->phy_mask = ~(1 << bp->phy_addr); snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance); - mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!mii_bus->irq) { - dev_err(sdev->dev, "mii_bus irq allocation failed\n"); - err = -ENOMEM; - goto err_out_mdiobus; - } - - memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); bp->mii_bus = mii_bus; err = mdiobus_register(mii_bus); if (err) { dev_err(sdev->dev, "failed to register MII bus\n"); - goto err_out_mdiobus_irq; + goto err_out_mdiobus; } - if (!bp->mii_bus->phy_map[bp->phy_addr] && + if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { dev_info(sdev->dev, @@ -2313,19 +2305,15 @@ static int b44_register_phy_one(struct b44 *bp) bp->phydev = phydev; bp->old_link = 0; - bp->phy_addr = phydev->addr; + bp->phy_addr = phydev->mdio.addr; - dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; err_out_mdiobus_unregister: mdiobus_unregister(mii_bus); -err_out_mdiobus_irq: - kfree(mii_bus->irq); - err_out_mdiobus: mdiobus_free(mii_bus); @@ -2339,7 +2327,6 @@ static void b44_unregister_phy_one(struct b44 *bp) phy_disconnect(bp->phydev); mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 8b1929e9f698..87c6b5bdd616 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -908,8 +908,7 @@ static int bcm_enet_open(struct net_device *dev) else phydev->advertising &= ~SUPPORTED_Pause; - dev_info(kdev, "attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); + phy_attached_info(phydev); priv->old_link = 0; priv->old_duplex = -1; @@ -1849,17 +1848,8 @@ static int bcm_enet_probe(struct platform_device *pdev) * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_free_mdio; - } - if (priv->has_phy_interrupt) bus->irq[priv->phy_id] = priv->phy_interrupt; - else - bus->irq[priv->phy_id] = PHY_POLL; ret = mdiobus_register(bus); if (ret) { @@ -2884,33 +2874,21 @@ struct platform_driver bcm63xx_enet_shared_driver = { }, }; +static struct platform_driver * const drivers[] = { + &bcm63xx_enet_shared_driver, + &bcm63xx_enet_driver, + &bcm63xx_enetsw_driver, +}; + /* entry point */ static int __init bcm_enet_init(void) { - int ret; - - ret = platform_driver_register(&bcm63xx_enet_shared_driver); - if (ret) - return ret; - - ret = platform_driver_register(&bcm63xx_enet_driver); - if (ret) - platform_driver_unregister(&bcm63xx_enet_shared_driver); - - ret = platform_driver_register(&bcm63xx_enetsw_driver); - if (ret) { - platform_driver_unregister(&bcm63xx_enet_driver); - platform_driver_unregister(&bcm63xx_enet_shared_driver); - } - - return ret; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit bcm_enet_exit(void) { - platform_driver_unregister(&bcm63xx_enet_driver); - platform_driver_unregister(&bcm63xx_enetsw_driver); - platform_driver_unregister(&bcm63xx_enet_shared_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 858106352ce9..993c780bdfab 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1216,7 +1216,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* Initialize SW view of the ring */ spin_lock_init(&ring->lock); ring->priv = priv; - netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); + netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; ring->alloc_size = ring->size; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 28f7610b03fe..c7798d360512 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1471,7 +1471,7 @@ static int bgmac_mii_register(struct bgmac *bgmac) struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int i, err = 0; + int err = 0; if (ci->id == BCMA_CHIP_ID_BCM4707 || ci->id == BCMA_CHIP_ID_BCM53018) @@ -1490,18 +1490,10 @@ static int bgmac_mii_register(struct bgmac *bgmac) mii_bus->parent = &bgmac->core->dev; mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!mii_bus->irq) { - err = -ENOMEM; - goto err_free_bus; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(mii_bus); if (err) { bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_irq; + goto err_free_bus; } bgmac->mii_bus = mii_bus; @@ -1522,8 +1514,6 @@ static int bgmac_mii_register(struct bgmac *bgmac) err_unregister_bus: mdiobus_unregister(mii_bus); -err_free_irq: - kfree(mii_bus->irq); err_free_bus: mdiobus_free(mii_bus); return err; @@ -1534,7 +1524,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) struct mii_bus *mii_bus = bgmac->mii_bus; mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index b5e64b02200c..cae0956186ce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -540,10 +540,6 @@ struct bnx2x_fastpath { struct napi_struct napi; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned long busy_poll_state; -#endif - union host_hc_status_block status_blk; /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; @@ -594,8 +590,6 @@ struct bnx2x_fastpath { /* The last maximal completed SGE */ u16 last_max_sge; __le16 *rx_cons_sb; - unsigned long rx_pkt, - rx_calls; /* TPA related */ struct bnx2x_agg_info *tpa_info; @@ -617,115 +611,6 @@ struct bnx2x_fastpath { #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) -#ifdef CONFIG_NET_RX_BUSY_POLL - -enum bnx2x_fp_state { - BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ - - BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ - BNX2X_STATE_FP_NAPI_REQ = BIT(1), - - BNX2X_STATE_FP_POLL_BIT = 2, - BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ - - BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ -}; - -static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) -{ - WRITE_ONCE(fp->busy_poll_state, 0); -} - -/* called from the device poll routine to get ownership of a FP */ -static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) -{ - unsigned long prev, old = READ_ONCE(fp->busy_poll_state); - - while (1) { - switch (old) { - case BNX2X_STATE_FP_POLL: - /* make sure bnx2x_fp_lock_poll() wont starve us */ - set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, - &fp->busy_poll_state); - /* fallthrough */ - case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); - if (unlikely(prev != old)) { - old = prev; - continue; - } - return true; - } -} - -static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) -{ - smp_wmb(); - fp->busy_poll_state = 0; -} - -/* called from bnx2x_low_latency_poll() */ -static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) -{ - return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; -} - -static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) -{ - smp_mb__before_atomic(); - clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); -} - -/* true if a socket is polling */ -static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) -{ - return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; -} - -/* false if fp is currently owned */ -static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) -{ - set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); - return !bnx2x_fp_ll_polling(fp); - -} -#else -static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) -{ - return true; -} - -static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) -{ - return false; -} - -static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) -{ - return false; -} -static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f8d7a2f06950..9695a4c4a434 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -46,7 +46,6 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) for_each_rx_queue_cnic(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); } } @@ -58,7 +57,6 @@ static void bnx2x_add_all_napi(struct bnx2x *bp) for_each_eth_queue(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); } } @@ -560,10 +558,8 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, put_page(pool->page); pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); - if (unlikely(!pool->page)) { - BNX2X_ERR("Can't alloc sge\n"); + if (unlikely(!pool->page)) return -ENOMEM; - } pool->offset = 0; } @@ -747,7 +743,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); break; default: - BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", be16_to_cpu(skb->protocol)); } } @@ -1094,12 +1090,7 @@ reuse_rx: __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - skb_mark_napi_id(skb, &fp->napi); - - if (bnx2x_fp_ll_polling(fp)) - netif_receive_skb(skb); - else - napi_gro_receive(&fp->napi, skb); + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -1131,9 +1122,6 @@ next_cqe: bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, fp->rx_sge_prod); - fp->rx_pkt += rx_pkt; - fp->rx_calls++; - return rx_pkt; } @@ -1869,7 +1857,6 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) int i; for_each_rx_queue_cnic(bp, i) { - bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -1879,7 +1866,6 @@ static void bnx2x_napi_enable(struct bnx2x *bp) int i; for_each_eth_queue(bp, i) { - bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -1890,8 +1876,6 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_ll_disable(&bp->fp[i])) - usleep_range(1000, 2000); } } @@ -1901,8 +1885,6 @@ static void bnx2x_napi_disable(struct bnx2x *bp) for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_ll_disable(&bp->fp[i])) - usleep_range(1000, 2000); } } @@ -3219,49 +3201,32 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) */ static int bnx2x_poll(struct napi_struct *napi, int budget) { - int work_done = 0; - u8 cos; struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, napi); struct bnx2x *bp = fp->bp; + int rx_work_done; + u8 cos; - while (1) { #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - napi_complete(napi); - return 0; - } + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } #endif - if (!bnx2x_fp_lock_napi(fp)) - return budget; - - for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) - bnx2x_tx_int(bp, fp->txdata_ptr[cos]); - - if (bnx2x_has_rx_work(fp)) { - work_done += bnx2x_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) { - bnx2x_fp_unlock_napi(fp); - break; - } - } - - bnx2x_fp_unlock_napi(fp); + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + bnx2x_tx_int(bp, fp->txdata_ptr[cos]); - /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; - /* No need to update SB for FCoE L2 ring as long as - * it's connected to the default SB and the SB - * has been updated when NAPI was scheduled. - */ - if (IS_FCOE_FP(fp)) { - napi_complete(napi); - break; - } + if (rx_work_done < budget) { + /* No need to update SB for FCoE L2 ring as long as + * it's connected to the default SB and the SB + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { + napi_complete(napi); + } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, * thus we need to ensure that status block indices @@ -3286,40 +3251,15 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, le16_to_cpu(fp->fp_hc_idx), IGU_INT_ENABLE, 1); - break; + } else { + rx_work_done = budget; } } } - return work_done; + return rx_work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -int bnx2x_low_latency_recv(struct napi_struct *napi) -{ - struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, - napi); - struct bnx2x *bp = fp->bp; - int found = 0; - - if ((bp->state == BNX2X_STATE_CLOSED) || - (bp->state == BNX2X_STATE_ERROR) || - (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) - return LL_FLUSH_FAILED; - - if (!bnx2x_fp_lock_poll(fp)) - return LL_FLUSH_BUSY; - - if (bnx2x_has_rx_work(fp)) - found = bnx2x_rx_int(fp, 4); - - bnx2x_fp_unlock_poll(fp); - - return found; -} -#endif - /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs @@ -3430,25 +3370,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) return rc; } -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ +#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 + +/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ +#define BNX2X_NUM_TSO_WIN_SUB_BDS 3 + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) /* check if packet requires linearization (packet is too fragmented) no need to check fragmentation if page size > 8K (there will be no violation to FW restrictions) */ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, u32 xmit_type) { - int to_copy = 0; - int hlen = 0; - int first_bd_sz = 0; + int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; + int to_copy = 0, hlen = 0; - /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ - if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { + if (xmit_type & XMIT_GSO_ENC) + num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { if (xmit_type & XMIT_GSO) { unsigned short lso_mss = skb_shinfo(skb)->gso_size; - /* Check if LSO packet needs to be copied: - 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ - int wnd_size = MAX_FETCH_BD - 3; + int wnd_size = MAX_FETCH_BD - num_tso_win_sub; /* Number of windows to check */ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; int wnd_idx = 0; @@ -4490,7 +4434,6 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, /* Limit the CQE producer by the CQE ring size */ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index b7d32e8412f1..4cbb03f87b5a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -570,13 +570,6 @@ int bnx2x_enable_msix(struct bnx2x *bp); int bnx2x_enable_msi(struct bnx2x *bp); /** - * bnx2x_low_latency_recv - LL callback - * - * @napi: napi structure - */ -int bnx2x_low_latency_recv(struct napi_struct *napi); - -/** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * * @bp: driver handle diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index d84efcd34fac..820b7e04bb5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -52,7 +52,7 @@ static const struct { { Q_STATS_OFFSET32(rx_skb_alloc_failed), 4, "[%s]: rx_skb_alloc_discard" }, { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, - + { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "[%s]: tx_ucast_packets" }, @@ -74,117 +74,115 @@ static const struct { static const struct { long offset; int size; - u32 flags; -#define STATS_FLAGS_PORT 1 -#define STATS_FLAGS_FUNC 2 -#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) + bool is_port_stat; char string[ETH_GSTRING_LEN]; } bnx2x_stats_arr[] = { /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bytes" }, + 8, false, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, + 8, false, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, + 8, false, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, + 8, false, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, + 8, false, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), - 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + 8, true, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), - 8, STATS_FLAGS_PORT, "rx_align_errors" }, + 8, true, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + 8, true, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, + 8, true, "rx_oversize_packets" }, /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), - 8, STATS_FLAGS_PORT, "rx_fragments" }, + 8, true, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), - 8, STATS_FLAGS_PORT, "rx_jabbers" }, + 8, true, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), - 8, STATS_FLAGS_BOTH, "rx_discards" }, + 8, false, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), - 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, + 4, true, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), - 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, + 4, true, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_received" }, + 8, true, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, + 8, true, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), - 8, STATS_FLAGS_PORT, "rx_brb_discard" }, + 8, true, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), - 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, + 8, true, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), - 8, STATS_FLAGS_PORT, "rx_pause_frames" }, + 8, true, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, + 8, true, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), - 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, + 4, true, "rx_constant_pause_events" }, /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), - 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, + 4, false, "rx_phy_ip_err_discards"}, { STATS_OFFSET32(rx_skb_alloc_failed), - 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, + 4, false, "rx_skb_alloc_discard" }, { STATS_OFFSET32(hw_csum_err), - 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, - + 4, false, "rx_csum_offload_errors" }, + { STATS_OFFSET32(driver_xoff), + 4, false, "tx_exhaustion_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bytes" }, + 8, false, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), - 8, STATS_FLAGS_PORT, "tx_error_bytes" }, + 8, true, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, + 8, false, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, + 8, false, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, + 8, false, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), - 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + 8, true, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), - 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, + 8, true, "tx_carrier_errors" }, /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + 8, true, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + 8, true, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), - 8, STATS_FLAGS_PORT, "tx_deferred" }, + 8, true, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + 8, true, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + 8, true, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), - 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + 8, true, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), - 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + 8, true, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), - 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + 8, true, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), - 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + 8, true, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), - 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, + 8, true, "tx_256_to_511_byte_packets" }, /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), - 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + 8, true, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + 8, true, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + 8, true, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), - 8, STATS_FLAGS_PORT, "tx_pause_frames" }, + 8, true, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, + 8, false, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, + 8, false, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), - 8, STATS_FLAGS_FUNC, "tpa_bytes"}, + 8, false, "tpa_bytes"}, { STATS_OFFSET32(recoverable_error), - 4, STATS_FLAGS_FUNC, "recoverable_errors" }, + 4, false, "recoverable_errors" }, { STATS_OFFSET32(unrecoverable_error), - 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, + 4, false, "unrecoverable_errors" }, { STATS_OFFSET32(driver_filtered_tx_pkt), - 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" }, + 4, false, "driver_filtered_tx_pkt" }, { STATS_OFFSET32(eee_tx_lpi), - 4, STATS_FLAGS_PORT, "Tx LPI entry count"} + 4, true, "Tx LPI entry count"} }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) @@ -3065,12 +3063,8 @@ static void bnx2x_self_test(struct net_device *dev, } } -#define IS_PORT_STAT(i) \ - ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) -#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define HIDE_PORT_STAT(bp) \ - ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ - IS_VF(bp)) +#define IS_PORT_STAT(i) (bnx2x_stats_arr[i].is_port_stat) +#define HIDE_PORT_STAT(bp) IS_VF(bp) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled @@ -3094,7 +3088,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) num_strings = 0; if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) - if (IS_FUNC_STAT(i)) + if (!IS_PORT_STAT(i)) num_strings++; } else num_strings += BNX2X_NUM_STATS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index cafd5de675cf..27aa0802d87d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3013,8 +3013,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 12 -#define BCM_5710_FW_REVISION_VERSION 30 +#define BCM_5710_FW_MINOR_VERSION 13 +#define BCM_5710_FW_REVISION_VERSION 1 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3583,7 +3583,7 @@ enum classify_rule { CLASSIFY_RULE_OPCODE_MAC, CLASSIFY_RULE_OPCODE_VLAN, CLASSIFY_RULE_OPCODE_PAIR, - CLASSIFY_RULE_OPCODE_VXLAN, + CLASSIFY_RULE_OPCODE_IMAC_VNI, MAX_CLASSIFY_RULE }; @@ -3826,6 +3826,17 @@ struct eth_classify_header { __le32 echo; }; +/* + * Command for adding/removing a Inner-MAC/VNI classification rule + */ +struct eth_classify_imac_vni_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 imac_lsb; + __le16 imac_mid; + __le16 imac_msb; + __le16 reserved1; +}; /* * Command for adding/removing a MAC classification rule @@ -3869,14 +3880,6 @@ struct eth_classify_vlan_cmd { /* * Command for adding/removing a VXLAN classification rule */ -struct eth_classify_vxlan_cmd { - struct eth_classify_cmd_header header; - __le32 vni; - __le16 inner_mac_lsb; - __le16 inner_mac_mid; - __le16 inner_mac_msb; - __le16 reserved1; -}; /* * union for eth classification rule @@ -3885,7 +3888,7 @@ union eth_classify_rule_cmd { struct eth_classify_mac_cmd mac; struct eth_classify_vlan_cmd vlan; struct eth_classify_pair_cmd pair; - struct eth_classify_vxlan_cmd vxlan; + struct eth_classify_imac_vni_cmd imac_vni; }; /* @@ -5623,6 +5626,14 @@ enum igu_mode { MAX_IGU_MODE }; +/* + * Inner Headers Classification Type + */ +enum inner_clss_type { + INNER_CLSS_DISABLED, + INNER_CLSS_USE_VLAN, + INNER_CLSS_USE_VNI, + MAX_INNER_CLSS_TYPE}; /* * IP versions @@ -5953,14 +5964,6 @@ enum ts_offset_cmd { MAX_TS_OFFSET_CMD }; -/* Tunnel Mode */ -enum tunnel_mode { - TUNN_MODE_NONE, - TUNN_MODE_VXLAN, - TUNN_MODE_GRE, - MAX_TUNNEL_MODE -}; - /* zone A per-queue data */ struct ustorm_queue_zone_data { struct ustorm_eth_rx_producers eth_rx_producers; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2e611dc5f162..6c4e3a69976f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13004,9 +13004,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnx2x_low_latency_recv, -#endif .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bdf094fb6ef9..df835f5e46d8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -72,8 +72,10 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_TX_PUSH_THRESH 92 enum board_idx { + BCM57301, BCM57302, BCM57304, + BCM57402, BCM57404, BCM57406, BCM57304_VF, @@ -84,17 +86,21 @@ enum board_idx { static const struct { char *name; } board_info[] = { - { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, + { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, #ifdef CONFIG_BNXT_SRIOV @@ -173,7 +179,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) u32 len, free_size, vlan_tag_flags, cfa_action, flags; u16 prod, last_frag; struct pci_dev *pdev = bp->pdev; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; @@ -183,8 +188,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(dev, i); prod = txr->tx_prod; @@ -417,8 +421,8 @@ tx_dma_error: static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - int index = bnapi->index; + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + int index = txr - &bp->tx_ring[0]; struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; @@ -596,7 +600,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, { struct bnxt *bp = bnapi->bp; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; u32 i; @@ -675,7 +679,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, { struct pci_dev *pdev = bp->pdev; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u32 i; @@ -856,8 +860,13 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, struct tcphdr *th; int payload_off, tcp_opt_len = 0; int len, nw_off; + u16 segs; + + segs = TPA_END_TPA_SEGS(tpa_end); + if (segs == 1) + return skb; - NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end); + NAPI_GRO_CB(skb)->count = segs; skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; @@ -929,7 +938,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, bool *agg_event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data, agg_bufs; u16 cp_cons = RING_CMP(*raw_cons); @@ -1045,7 +1054,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bool *agg_event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; struct net_device *dev = bp->dev; struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; @@ -1187,8 +1196,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb->csum_level = RX_CMP_ENCAP(rxcmp1); } } else { - if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) - cpr->rx_l4_csum_errors++; + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { + if (dev->features & NETIF_F_RXCSUM) + cpr->rx_l4_csum_errors++; + } } skb_record_rx_queue(skb, bnapi->index); @@ -1377,7 +1388,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnxt_tx_int(bp, bnapi, tx_pkts); if (rx_event) { - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); @@ -1446,19 +1457,14 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) int i, max_idx; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->tx_ring) return; max_idx = bp->tx_nr_pages * TX_DESC_CNT; for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb = tx_buf->skb; @@ -1504,21 +1510,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) int i, max_idx, max_agg_idx; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->rx_ring) return; max_idx = bp->rx_nr_pages * RX_DESC_CNT; max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; int j; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; - if (rxr->rx_tpa) { for (j = 0; j < MAX_TPA; j++) { struct bnxt_tpa_info *tpa_info = @@ -1646,19 +1646,13 @@ static void bnxt_free_rx_rings(struct bnxt *bp) { int i; - if (!bp->bnapi) + if (!bp->rx_ring) return; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; - kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -1677,6 +1671,9 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) { int i, rc, agg_rings = 0, tpa_rings = 0; + if (!bp->rx_ring) + return -ENOMEM; + if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; @@ -1684,14 +1681,9 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) tpa_rings = 1; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; ring = &rxr->rx_ring_struct; rc = bnxt_alloc_ring(bp, ring); @@ -1729,19 +1721,13 @@ static void bnxt_free_tx_rings(struct bnxt *bp) int i; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->tx_ring) return; for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; - if (txr->tx_push) { dma_free_coherent(&pdev->dev, bp->tx_push_size, txr->tx_push, txr->tx_push_mapping); @@ -1775,14 +1761,9 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) } for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; ring = &txr->tx_ring_struct; rc = bnxt_alloc_ring(bp, ring); @@ -1885,7 +1866,10 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ring->dma_arr = cpr->cp_desc_mapping; ring->vmem_size = 0; - rxr = &bnapi->rx_ring; + rxr = bnapi->rx_ring; + if (!rxr) + goto skip_rx; + ring = &rxr->rx_ring_struct; ring->nr_pages = bp->rx_nr_pages; ring->page_size = HW_RXBD_RING_SIZE; @@ -1902,7 +1886,11 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; ring->vmem = (void **)&rxr->rx_agg_ring; - txr = &bnapi->tx_ring; +skip_rx: + txr = bnapi->tx_ring; + if (!txr) + continue; + ring = &txr->tx_ring_struct; ring->nr_pages = bp->tx_nr_pages; ring->page_size = HW_RXBD_RING_SIZE; @@ -1938,22 +1926,18 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) { struct net_device *dev = bp->dev; - struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; u32 prod, type; int i; - if (!bnapi) - return -EINVAL; - type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; if (NET_IP_ALIGN == 2) type |= RX_BD_FLAGS_SOP; - rxr = &bnapi->rx_ring; + rxr = &bp->rx_ring[ring_nr]; ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); @@ -1969,11 +1953,12 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) rxr->rx_prod = prod; ring->fw_ring_id = INVALID_HW_RING_ID; + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) return 0; - ring = &rxr->rx_agg_ring_struct; - type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; @@ -1989,7 +1974,6 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; - ring->fw_ring_id = INVALID_HW_RING_ID; if (bp->flags & BNXT_FLAG_TPA) { if (rxr->rx_tpa) { @@ -2035,8 +2019,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp) MAX_SKB_FRAGS + 1); for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; @@ -2423,14 +2406,18 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) cpr = &bnapi->cp_ring; cpr->cp_raw_cons = 0; - txr = &bnapi->tx_ring; - txr->tx_prod = 0; - txr->tx_cons = 0; + txr = bnapi->tx_ring; + if (txr) { + txr->tx_prod = 0; + txr->tx_cons = 0; + } - rxr = &bnapi->rx_ring; - rxr->rx_prod = 0; - rxr->rx_agg_prod = 0; - rxr->rx_sw_agg_prod = 0; + rxr = bnapi->rx_ring; + if (rxr) { + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + } } } @@ -2496,6 +2483,10 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); + kfree(bp->tx_ring); + bp->tx_ring = NULL; + kfree(bp->rx_ring); + bp->rx_ring = NULL; kfree(bp->bnapi); bp->bnapi = NULL; } else { @@ -2505,7 +2496,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) { - int i, rc, size, arr_size; + int i, j, rc, size, arr_size; void *bnapi; if (irq_re_init) { @@ -2527,6 +2518,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) bp->bnapi[i]->bp = bp; } + bp->rx_ring = kcalloc(bp->rx_nr_rings, + sizeof(struct bnxt_rx_ring_info), + GFP_KERNEL); + if (!bp->rx_ring) + return -ENOMEM; + + for (i = 0; i < bp->rx_nr_rings; i++) { + bp->rx_ring[i].bnapi = bp->bnapi[i]; + bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; + } + + bp->tx_ring = kcalloc(bp->tx_nr_rings, + sizeof(struct bnxt_tx_ring_info), + GFP_KERNEL); + if (!bp->tx_ring) + return -ENOMEM; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + j = 0; + else + j = bp->rx_nr_rings; + + for (i = 0; i < bp->tx_nr_rings; i++, j++) { + bp->tx_ring[i].bnapi = bp->bnapi[j]; + bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + } + rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; @@ -2596,6 +2614,9 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* Write request msg to hwrm channel */ __iowrite32_copy(bp->bar0, data, msg_len / 4); + for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) + writel(0, bp->bar0 + i); + /* currently supports only one outstanding message */ if (intr_process) bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & @@ -2693,17 +2714,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) req.ver_upd = DRV_VER_UPD; if (BNXT_PF(bp)) { - unsigned long vf_req_snif_bmap[4]; + DECLARE_BITMAP(vf_req_snif_bmap, 256); u32 *data = (u32 *)vf_req_snif_bmap; - memset(vf_req_snif_bmap, 0, 32); + memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); - for (i = 0; i < 8; i++) { - req.vf_req_fwd[i] = cpu_to_le32(*data); - data++; - } + for (i = 0; i < 8; i++) + req.vf_req_fwd[i] = cpu_to_le32(data[i]); + req.enables |= cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } @@ -2711,6 +2731,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_unrgtr_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { u32 rc = 0; @@ -2773,7 +2801,7 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); @@ -2806,7 +2834,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ - CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID) + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) @@ -2825,7 +2853,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ethertype = htons(ETH_P_IP); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ipaddr_type = 4; + req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; req.src_ipaddr[0] = keys->addrs.v4addrs.src; @@ -2838,7 +2866,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.dst_port = keys->ports.dst; req.dst_port_mask = cpu_to_be16(0xffff); - req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) @@ -2858,10 +2886,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); req.enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | - CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); memcpy(req.l2_addr, mac_addr, ETH_ALEN); req.l2_addr_mask[0] = 0xff; @@ -2931,7 +2959,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) req.enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | - VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS); + VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | + VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); /* Number of segs are log2 units, and first packet is not * included as part of this units. @@ -2949,6 +2978,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) segs = ilog2(nsegs); req.max_agg_segs = cpu_to_le16(segs); req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + + req.min_agg_len = cpu_to_le32(512); } req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -3059,7 +3090,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - int grp_idx = 0; + unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input req = {0}; @@ -3070,10 +3101,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); req.cos_rule = cpu_to_le16(0xffff); if (vnic->flags & BNXT_VNIC_RSS_FLAG) - grp_idx = 0; + ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) - grp_idx = vnic_id - 1; + ring = vnic_id - 1; + grp_idx = bp->rx_ring[ring].bnapi->index; req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); @@ -3114,22 +3146,25 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp) bnxt_hwrm_vnic_free_one(bp, i); } -static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, - u16 end_grp_id) +static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) { - u32 rc = 0, i, j; + int rc = 0; + unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; struct hwrm_vnic_alloc_input req = {0}; struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; /* map ring groups to this vnic */ - for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) { - if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { + for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { + grp_idx = bp->rx_ring[i].bnapi->index; + if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", - j, (end_grp_id - start_grp_id)); + j, nr_rings); break; } bp->vnic_info[vnic_id].fw_grp_ids[j] = - bp->grp_info[i].fw_grp_id; + bp->grp_info[grp_idx].fw_grp_id; } bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; @@ -3156,20 +3191,22 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) struct hwrm_ring_grp_alloc_input req = {0}; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; + unsigned int grp_idx = bp->rx_ring[i].bnapi->index; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); - req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); + req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; - bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); + bp->grp_info[grp_idx].fw_grp_id = + le32_to_cpu(resp->ring_group_id); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -3294,75 +3331,66 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; - if (bp->cp_nr_rings) { - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); - if (rc) - goto err_out; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + cpr->cp_doorbell = bp->bar1 + i * 0x80; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; } - if (bp->tx_nr_rings) { - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 map_idx = txr->bnapi->index; + u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_TX, i, - fw_stats_ctx); - if (rc) - goto err_out; - txr->tx_doorbell = bp->bar1 + i * 0x80; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, + map_idx, fw_stats_ctx); + if (rc) + goto err_out; + txr->tx_doorbell = bp->bar1 + map_idx * 0x80; } - if (bp->rx_nr_rings) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 map_idx = rxr->bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_RX, i, - INVALID_STATS_CTX_ID); - if (rc) - goto err_out; - rxr->rx_doorbell = bp->bar1 + i * 0x80; - writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, + map_idx, INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; } if (bp->flags & BNXT_FLAG_AGG_RINGS) { for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - bp->rx_nr_rings + i, + map_idx, INVALID_STATS_CTX_ID); if (rc) goto err_out; - rxr->rx_agg_doorbell = - bp->bar1 + (bp->rx_nr_rings + i) * 0x80; + rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); - bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } err_out: @@ -3409,91 +3437,75 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, return 0; } -static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) +static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { - int i, rc = 0; + int i; if (!bp->bnapi) - return 0; + return; - if (bp->tx_nr_rings) { - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_TX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - } + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 grp_idx = txr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_TX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; } } - if (bp->rx_nr_rings) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].rx_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = + INVALID_HW_RING_ID; } } - if (bp->rx_agg_nr_pages) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = - &rxr->rx_agg_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].agg_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = + INVALID_HW_RING_ID; } } - if (bp->cp_nr_rings) { - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_CMPL, - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].cp_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; } } - - return rc; } int bnxt_hwrm_set_coal(struct bnxt *bp) @@ -3605,7 +3617,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) return 0; } -static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {0}; @@ -3629,9 +3641,10 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); - pf->max_pf_tx_rings = pf->max_tx_rings; pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); - pf->max_pf_rx_rings = pf->max_rx_rings; + pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!pf->max_hw_ring_grps) + pf->max_hw_ring_grps = pf->max_tx_rings; pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); pf->max_vnics = le16_to_cpu(resp->max_vnics); pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); @@ -3659,6 +3672,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!vf->max_hw_ring_grps) + vf->max_hw_ring_grps = vf->max_tx_rings; vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); vf->max_vnics = le16_to_cpu(resp->max_vnics); vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); @@ -3735,14 +3751,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); - if (req.hwrm_intf_maj != resp->hwrm_intf_maj || - req.hwrm_intf_min != resp->hwrm_intf_min || - req.hwrm_intf_upd != resp->hwrm_intf_upd) { - netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", + if (resp->hwrm_intf_maj < 1) { + netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd, req.hwrm_intf_maj, - req.hwrm_intf_min, req.hwrm_intf_upd); - netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); + resp->hwrm_intf_upd); + netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, @@ -3868,7 +3881,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) break; bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", vnic_id, rc); @@ -3945,8 +3958,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) } bp->vnic_info[0].uc_filter_count = 1; - bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | - CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) bp->vnic_info[0].rx_mask |= @@ -4027,20 +4039,42 @@ static int bnxt_set_real_num_queues(struct bnxt *bp) return rc; #ifdef CONFIG_RFS_ACCEL - if (bp->rx_nr_rings) + if (bp->flags & BNXT_FLAG_RFS) dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); - if (!dev->rx_cpu_rmap) - rc = -ENOMEM; #endif return rc; } +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared) +{ + int _rx = *rx, _tx = *tx; + + if (shared) { + *rx = min_t(int, _rx, max); + *tx = min_t(int, _tx, max); + } else { + if (max < 2) + return -ENOMEM; + + while (_rx + _tx > max) { + if (_rx > _tx && _rx > 1) + _rx--; + else if (_tx > 1) + _tx--; + } + *rx = _rx; + *tx = _tx; + } + return 0; +} + static int bnxt_setup_msix(struct bnxt *bp) { struct msix_entry *msix_ent; struct net_device *dev = bp->dev; - int i, total_vecs, rc = 0; + int i, total_vecs, rc = 0, min = 1; const int len = sizeof(bp->irq_tbl[0].name); bp->flags &= ~BNXT_FLAG_USING_MSIX; @@ -4055,7 +4089,10 @@ static int bnxt_setup_msix(struct bnxt *bp) msix_ent[i].vector = 0; } - total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + min = 2; + + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); if (total_vecs < 0) { rc = -ENODEV; goto msix_setup_exit; @@ -4066,8 +4103,11 @@ static int bnxt_setup_msix(struct bnxt *bp) int tcs; /* Trim rings based upon num of vectors allocated */ - bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); - bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); + rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, + total_vecs, min == 1); + if (rc) + goto msix_setup_exit; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; tcs = netdev_get_num_tc(dev); if (tcs > 1) { @@ -4086,12 +4126,21 @@ static int bnxt_setup_msix(struct bnxt *bp) } } } - bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + bp->cp_nr_rings = total_vecs; for (i = 0; i < bp->cp_nr_rings; i++) { + char *attr; + bp->irq_tbl[i].vector = msix_ent[i].vector; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + attr = "TxRx"; + else if (i < bp->rx_nr_rings) + attr = "rx"; + else + attr = "tx"; + snprintf(bp->irq_tbl[i].name, len, - "%s-%s-%d", dev->name, "TxRx", i); + "%s-%s-%d", dev->name, attr, i); bp->irq_tbl[i].handler = bnxt_msix; } rc = bnxt_set_real_num_queues(bp); @@ -4129,6 +4178,7 @@ static int bnxt_setup_inta(struct bnxt *bp) bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 0); @@ -4177,7 +4227,7 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { - int i, rc = 0; + int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; @@ -4186,14 +4236,15 @@ static int bnxt_request_irq(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { struct bnxt_irq *irq = &bp->irq_tbl[i]; #ifdef CONFIG_RFS_ACCEL - if (rmap && (i < bp->rx_nr_rings)) { + if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", - i); + j); + j++; } #endif rc = request_irq(irq->vector, irq->handler, flags, irq->name, @@ -4231,12 +4282,10 @@ static void bnxt_init_napi(struct bnxt *bp) bnapi = bp->bnapi[i]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); - napi_hash_add(&bnapi->napi); } } else { bnapi = bp->bnapi[0]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); - napi_hash_add(&bnapi->napi); } } @@ -4266,14 +4315,12 @@ static void bnxt_enable_napi(struct bnxt *bp) static void bnxt_tx_disable(struct bnxt *bp) { int i; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct netdev_queue *txq; - if (bp->bnapi) { + if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); __netif_tx_lock(txq, smp_processor_id()); txr->dev_state = BNXT_DEV_STATE_CLOSING; @@ -4288,13 +4335,11 @@ static void bnxt_tx_disable(struct bnxt *bp) static void bnxt_tx_enable(struct bnxt *bp) { int i; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } @@ -4356,7 +4401,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex_setting; + link_info->duplex_setting = resp->duplex; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else @@ -4603,7 +4648,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bp->nge_port_cnt = 1; } - bp->state = BNXT_STATE_OPEN; + set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); /* Enable TX queues */ bnxt_tx_enable(bp); @@ -4679,8 +4724,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); - bp->state = BNXT_STATE_CLOSED; - cancel_work_sync(&bp->sp_task); + clear_bit(BNXT_STATE_OPEN, &bp->state); + smp_mb__after_atomic(); + while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) + msleep(20); /* Flush rings before disabling interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -4931,9 +4978,32 @@ skip_uc: return rc; } +static bool bnxt_rfs_capable(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + struct bnxt_pf_info *pf = &bp->pf; + int vnics; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) + return false; + + vnics = 1 + bp->rx_nr_rings; + if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) + return false; + + return true; +#else + return false; +#endif +} + static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { + struct bnxt *bp = netdev_priv(dev); + + if (!bnxt_rfs_capable(bp)) + features &= ~NETIF_F_NTUPLE; return features; } @@ -4974,7 +5044,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bp->flags = flags; - if (!netif_running(dev)) { + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -4998,31 +5068,53 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + int i = bnapi->index; + + if (!txr) + return; + + netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, + txr->tx_cons); +} + +static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + int i = bnapi->index; + + if (!rxr) + return; + + netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, + rxr->rx_sw_agg_prod); +} + +static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i = bnapi->index; + + netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); +} + static void bnxt_dbg_dump_states(struct bnxt *bp) { int i; struct bnxt_napi *bnapi; - struct bnxt_tx_ring_info *txr; - struct bnxt_rx_ring_info *rxr; - struct bnxt_cp_ring_info *cpr; for (i = 0; i < bp->cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; - rxr = &bnapi->rx_ring; - cpr = &bnapi->cp_ring; if (netif_msg_drv(bp)) { - netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", - i, txr->tx_ring_struct.fw_ring_id, - txr->tx_prod, txr->tx_cons); - netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", - i, rxr->rx_ring_struct.fw_ring_id, - rxr->rx_prod, - rxr->rx_agg_ring_struct.fw_ring_id, - rxr->rx_agg_prod, rxr->rx_sw_agg_prod); - netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", - i, cpr->cp_ring_struct.fw_ring_id, - cpr->cp_raw_cons); + bnxt_dump_tx_sw_state(bnapi); + bnxt_dump_rx_sw_state(bnapi); + bnxt_dump_cp_sw_state(bnapi); } } } @@ -5030,8 +5122,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) static void bnxt_reset_task(struct bnxt *bp) { bnxt_dbg_dump_states(bp); - if (netif_running(bp->dev)) - bnxt_tx_disable(bp); /* prevent tx timout again */ + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + bnxt_open_nic(bp, false, false); + } } static void bnxt_tx_timeout(struct net_device *dev) @@ -5081,8 +5175,12 @@ static void bnxt_sp_task(struct work_struct *work) struct bnxt *bp = container_of(work, struct bnxt, sp_task); int rc; - if (bp->state != BNXT_STATE_OPEN) + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); return; + } if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_cfg_rx_mode(bp); @@ -5106,8 +5204,19 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); } - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { + /* bnxt_reset_task() calls bnxt_close_nic() which waits + * for BNXT_STATE_IN_SP_TASK to clear. + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); bnxt_reset_task(bp); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); + } + + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) @@ -5186,7 +5295,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->timer.function = bnxt_timer; bp->current_interval = BNXT_TIMER_INTERVAL; - bp->state = BNXT_STATE_CLOSED; + clear_bit(BNXT_STATE_OPEN, &bp->state); return 0; @@ -5276,10 +5385,14 @@ static int bnxt_setup_tc(struct net_device *dev, u8 tc) return 0; if (tc) { - int max_rx_rings, max_tx_rings; + int max_rx_rings, max_tx_rings, rc; + bool sh = false; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); - if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) + rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) return -ENOMEM; } @@ -5533,6 +5646,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) cancel_work_sync(&bp->sp_task); bp->sp_event = 0; + bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); pci_iounmap(pdev, bp->bar2); pci_iounmap(pdev, bp->bar1); @@ -5592,28 +5706,64 @@ static int bnxt_get_max_irq(struct pci_dev *pdev) return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; } -void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) +static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, + int *max_cp) { - int max_rings = 0; + int max_ring_grps = 0; - if (BNXT_PF(bp)) { - *max_tx = bp->pf.max_pf_tx_rings; - *max_rx = bp->pf.max_pf_rx_rings; - max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); - max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); - } else { #ifdef CONFIG_BNXT_SRIOV + if (!BNXT_PF(bp)) { *max_tx = bp->vf.max_tx_rings; *max_rx = bp->vf.max_rx_rings; - max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); - max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); + *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); + *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); + max_ring_grps = bp->vf.max_hw_ring_grps; + } else #endif + { + *max_tx = bp->pf.max_tx_rings; + *max_rx = bp->pf.max_rx_rings; + *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); + *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); + max_ring_grps = bp->pf.max_hw_ring_grps; } + if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; + *max_rx = min_t(int, *max_rx, max_ring_grps); +} + +int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) +{ + int rx, tx, cp; + + _bnxt_get_max_rings(bp, &rx, &tx, &cp); + if (!rx || !tx || !cp) + return -ENOMEM; - *max_rx = min_t(int, *max_rx, max_rings); - *max_tx = min_t(int, *max_tx, max_rings); + *max_rx = rx; + *max_tx = tx; + return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); +} + +static int bnxt_set_dflt_rings(struct bnxt *bp) +{ + int dflt_rings, max_rx_rings, max_tx_rings, rc; + bool sh = true; + + if (sh) + bp->flags |= BNXT_FLAG_SHARED_RINGS; + dflt_rings = netif_get_num_default_rss_queues(); + rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + if (rc) + return rc; + bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); + bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; + bp->num_stat_ctxs = bp->cp_nr_rings; + return rc; } static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -5621,7 +5771,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; struct net_device *dev; struct bnxt *bp; - int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings; + int rc, max_irqs; if (version_printed++ == 0) pr_info("%s", version); @@ -5636,11 +5786,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bnxt_vf_pciid(ent->driver_data)) bp->flags |= BNXT_FLAG_VF; - if (pdev->msix_cap) { + if (pdev->msix_cap) bp->flags |= BNXT_FLAG_MSIX_CAP; - if (BNXT_PF(bp)) - bp->flags |= BNXT_FLAG_RFS; - } rc = bnxt_init_board(pdev, dev); if (rc < 0) @@ -5659,9 +5806,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; - if (bp->flags & BNXT_FLAG_RFS) - dev->hw_features |= NETIF_F_NTUPLE; - dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -5706,19 +5850,21 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); - dflt_rings = netif_get_num_default_rss_queues(); if (BNXT_PF(bp)) bp->pf.max_irqs = max_irqs; #if defined(CONFIG_BNXT_SRIOV) else bp->vf.max_irqs = max_irqs; #endif - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); - bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); - bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; - bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); - bp->num_stat_ctxs = bp->cp_nr_rings; + bnxt_set_dflt_rings(bp); + + if (BNXT_PF(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } + } if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 674bc5159b91..8af3ca8efcef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -11,11 +11,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "0.1.24" +#define DRV_MODULE_VERSION "1.0.0" -#define DRV_VER_MAJ 0 -#define DRV_VER_MIN 1 -#define DRV_VER_UPD 24 +#define DRV_VER_MAJ 1 +#define DRV_VER_MIN 0 +#define DRV_VER_UPD 0 struct tx_bd { __le32 tx_bd_len_flags_type; @@ -528,6 +528,7 @@ struct tx_push_bd { }; struct bnxt_tx_ring_info { + struct bnxt_napi *bnapi; u16 tx_prod; u16 tx_cons; void __iomem *tx_doorbell; @@ -558,6 +559,7 @@ struct bnxt_tpa_info { }; struct bnxt_rx_ring_info { + struct bnxt_napi *bnapi; u16 rx_prod; u16 rx_agg_prod; u16 rx_sw_agg_prod; @@ -604,8 +606,8 @@ struct bnxt_napi { int index; struct bnxt_cp_ring_info cp_ring; - struct bnxt_rx_ring_info rx_ring; - struct bnxt_tx_ring_info tx_ring; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring; #ifdef CONFIG_NET_RX_BUSY_POLL atomic_t poll_state; @@ -695,6 +697,7 @@ struct bnxt_vf_info { u16 max_cp_rings; u16 max_tx_rings; u16 max_rx_rings; + u16 max_hw_ring_grps; u16 max_l2_ctxs; u16 max_irqs; u16 max_vnics; @@ -722,9 +725,8 @@ struct bnxt_pf_info { u16 max_rsscos_ctxs; u16 max_cp_rings; u16 max_tx_rings; /* HW assigned max tx rings for this PF */ - u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */ u16 max_rx_rings; /* HW assigned max rx rings for this PF */ - u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */ + u16 max_hw_ring_grps; u16 max_irqs; u16 max_l2_ctxs; u16 max_vnics; @@ -875,6 +877,8 @@ struct bnxt { #define BNXT_FLAG_USING_MSIX 0x40 #define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_RFS 0x100 + #define BNXT_FLAG_SHARED_RINGS 0x200 + #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ BNXT_FLAG_STRIP_VLAN) @@ -884,6 +888,9 @@ struct bnxt { struct bnxt_napi **bnapi; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring; + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u32 rx_ring_size; @@ -913,6 +920,8 @@ struct bnxt { int cp_nr_rings; int num_stat_ctxs; + + /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; int nr_vnics; @@ -925,9 +934,9 @@ struct bnxt { struct timer_list timer; - int state; -#define BNXT_STATE_CLOSED 0 -#define BNXT_STATE_OPEN 1 + unsigned long state; +#define BNXT_STATE_OPEN 0 +#define BNXT_STATE_IN_SP_TASK 1 struct bnxt_irq *irq_tbl; u8 mac_addr[ETH_ALEN]; @@ -1084,9 +1093,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); +int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); -void bnxt_get_max_rings(struct bnxt *, int *, int *); +int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 45bd628eaf3a..922b898e7a32 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -211,7 +211,10 @@ static void bnxt_get_channels(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + channel->max_combined = max_rx_rings; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; @@ -219,9 +222,12 @@ static void bnxt_get_channels(struct net_device *dev, channel->max_rx = max_rx_rings; channel->max_tx = max_tx_rings; channel->max_other = 0; - channel->max_combined = 0; - channel->rx_count = bp->rx_nr_rings; - channel->tx_count = bp->tx_nr_rings_per_tc; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { + channel->combined_count = bp->rx_nr_rings; + } else { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } } static int bnxt_set_channels(struct net_device *dev, @@ -230,19 +236,35 @@ static int bnxt_set_channels(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; u32 rc = 0; + bool sh = false; + + if (channel->other_count) + return -EINVAL; + + if (!channel->combined_count && + (!channel->rx_count || !channel->tx_count)) + return -EINVAL; - if (channel->other_count || channel->combined_count || - !channel->rx_count || !channel->tx_count) + if (channel->combined_count && + (channel->rx_count || channel->tx_count)) return -EINVAL; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + if (channel->combined_count) + sh = true; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; - if (channel->rx_count > max_rx_rings || - channel->tx_count > max_tx_rings) - return -EINVAL; + if (sh && (channel->combined_count > max_rx_rings || + channel->combined_count > max_tx_rings)) + return -ENOMEM; + + if (!sh && (channel->rx_count > max_rx_rings || + channel->tx_count > max_tx_rings)) + return -ENOMEM; if (netif_running(dev)) { if (BNXT_PF(bp)) { @@ -258,14 +280,27 @@ static int bnxt_set_channels(struct net_device *dev, } } - bp->rx_nr_rings = channel->rx_count; - bp->tx_nr_rings_per_tc = channel->tx_count; + if (sh) { + bp->flags |= BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->combined_count; + bp->tx_nr_rings_per_tc = channel->combined_count; + } else { + bp->flags &= ~BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->rx_count; + bp->tx_nr_rings_per_tc = channel->tx_count; + } + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; if (tcs > 1) bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; + bp->num_stat_ctxs = bp->cp_nr_rings; + /* After changing number of rx channels, update NTUPLE feature. */ + netdev_update_features(dev); if (netif_running(dev)) { rc = bnxt_open_nic(bp, true, false); if ((!rc) && BNXT_PF(bp)) { @@ -802,6 +837,45 @@ static int bnxt_flash_nvram(struct net_device *dev, return rc; } +static int bnxt_firmware_reset(struct net_device *dev, + u16 dir_type) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + + /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ + /* (e.g. when firmware isn't already running) */ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + /* Self-reset ChiMP upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + break; + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + req.embedded_proc_type = + FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + break; + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; + break; + default: + return -EINVAL; + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_flash_firmware(struct net_device *dev, u16 dir_type, const u8 *fw_data, @@ -818,6 +892,9 @@ static int bnxt_flash_firmware(struct net_device *dev, case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_APE_FW: + code_type = CODE_MCTP_PASSTHRU; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@ -856,10 +933,9 @@ static int bnxt_flash_firmware(struct net_device *dev, /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 0, 0, fw_data, fw_size); - if (rc == 0) { /* Firmware update successful */ - /* TODO: Notify processor it needs to reset itself - */ - } + if (rc == 0) /* Firmware update successful */ + rc = bnxt_firmware_reset(dev, dir_type); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 70fc8253c07f..4badbedcb421 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -103,19 +103,22 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) __le32 event_data2; u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; }; @@ -132,9 +135,16 @@ struct hwrm_async_event_cmpl_link_status_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; - #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 }; /* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */ @@ -150,7 +160,8 @@ struct hwrm_async_event_cmpl_link_mtu_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 @@ -169,7 +180,8 @@ struct hwrm_async_event_cmpl_link_speed_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL @@ -200,7 +212,8 @@ struct hwrm_async_event_cmpl_dcb_config_change { #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 @@ -219,7 +232,8 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed { #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 @@ -238,7 +252,8 @@ struct hwrm_async_event_cmpl_func_drvr_unload { #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 @@ -257,7 +272,8 @@ struct hwrm_async_event_cmpl_func_drvr_load { #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 @@ -276,10 +292,13 @@ struct hwrm_async_event_cmpl_pf_drvr_unload { #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 }; /* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */ @@ -289,16 +308,19 @@ struct hwrm_async_event_cmpl_pf_drvr_load { #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) __le16 event_id; - #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0) __le32 event_data2; u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 }; /* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */ @@ -314,7 +336,8 @@ struct hwrm_async_event_cmpl_vf_flr { #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 @@ -333,7 +356,8 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change { #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 @@ -357,18 +381,20 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 0.7.8 */ -#define HWRM_VERSION_MAJOR 0 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 8 +/* HW Resource Manager Specification 1.0.0 */ +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 0 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "0.7.8" -/* Following is the signature for HWRM message field that indicates not +#define HWRM_VERSION_STR "1.0.0" +/* + * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((__le32)(-1)) @@ -398,7 +424,9 @@ struct output { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET (0x0UL) - #define HWRM_FUNC_DISABLE (0x10UL) + #define HWRM_FUNC_BUF_UNRGTR (0xeUL) + #define HWRM_FUNC_VF_CFG (0xfUL) + #define RESERVED1 (0x10UL) #define HWRM_FUNC_RESET (0x11UL) #define HWRM_FUNC_GETFID (0x12UL) #define HWRM_FUNC_VF_ALLOC (0x13UL) @@ -414,10 +442,9 @@ struct cmd_nums { #define HWRM_FUNC_DRV_RGTR (0x1dUL) #define HWRM_FUNC_DRV_QVER (0x1eUL) #define HWRM_FUNC_BUF_RGTR (0x1fUL) - #define HWRM_FUNC_VF_CFG (0x20UL) #define HWRM_PORT_PHY_CFG (0x20UL) #define HWRM_PORT_MAC_CFG (0x21UL) - #define HWRM_PORT_ENABLE (0x22UL) + #define RESERVED2 (0x22UL) #define HWRM_PORT_QSTATS (0x23UL) #define HWRM_PORT_LPBK_QSTATS (0x24UL) #define HWRM_PORT_CLR_STATS (0x25UL) @@ -455,13 +482,11 @@ struct cmd_nums { #define HWRM_RING_GRP_FREE (0x61UL) #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL) #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL) - #define HWRM_ARB_GRP_ALLOC (0x80UL) - #define HWRM_ARB_GRP_CFG (0x81UL) #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL) #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL) + #define RESERVED3 (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -469,6 +494,9 @@ struct cmd_nums { #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL) #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL) #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL) + #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL) + #define HWRM_CFA_EM_FLOW_FREE (0x9dUL) + #define HWRM_CFA_EM_FLOW_CFG (0x9eUL) #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL) #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL) #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL) @@ -483,8 +511,6 @@ struct cmd_nums { #define HWRM_FWD_RESP (0xd2UL) #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) - #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL) - #define HWRM_MGMT_L2_FILTER_FREE (0x101UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) @@ -505,7 +531,6 @@ struct cmd_nums { __le16 unused_0[3]; }; -/* Return Codes (8 bytes) */ struct ret_codes { __le16 error_code; #define HWRM_ERR_CODE_SUCCESS (0x0UL) @@ -529,7 +554,7 @@ struct hwrm_err_output { __le16 resp_len; __le32 opaque_0; __le16 opaque_1; - u8 opaque_2; + u8 cmd_err; u8 valid; }; @@ -686,65 +711,38 @@ struct hwrm_ver_get_output { u8 hwrm_fw_min; u8 hwrm_fw_bld; u8 hwrm_fw_rsvd; - u8 ape_fw_maj; - u8 ape_fw_min; - u8 ape_fw_bld; - u8 ape_fw_rsvd; - u8 kong_fw_maj; - u8 kong_fw_min; - u8 kong_fw_bld; - u8 kong_fw_rsvd; - u8 tang_fw_maj; - u8 tang_fw_min; - u8 tang_fw_bld; - u8 tang_fw_rsvd; - u8 bono_fw_maj; - u8 bono_fw_min; - u8 bono_fw_bld; - u8 bono_fw_rsvd; + u8 mgmt_fw_maj; + u8 mgmt_fw_min; + u8 mgmt_fw_bld; + u8 mgmt_fw_rsvd; + u8 netctrl_fw_maj; + u8 netctrl_fw_min; + u8 netctrl_fw_bld; + u8 netctrl_fw_rsvd; + __le32 reserved1; + u8 roce_fw_maj; + u8 roce_fw_min; + u8 roce_fw_bld; + u8 roce_fw_rsvd; char hwrm_fw_name[16]; - char ape_fw_name[16]; - char kong_fw_name[16]; - char tang_fw_name[16]; - char bono_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + __le32 reserved2[4]; + char roce_fw_name[16]; __le16 chip_num; u8 chip_rev; u8 chip_metal; u8 chip_bond_id; - u8 unused_0; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0) + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0) + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0) __le16 max_req_win_len; __le16 max_resp_len; __le16 def_req_timeout; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_disable */ -/* Input (24 bytes) */ -struct hwrm_func_disable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL - __le16 vf_id; - __le16 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_func_disable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; u8 valid; }; @@ -759,7 +757,12 @@ struct hwrm_func_reset_input { __le32 enables; #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL __le16 vf_id; - __le16 unused_0; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0) + u8 unused_0; }; /* Output (16 bytes) */ @@ -861,7 +864,7 @@ struct hwrm_func_vf_free_output { }; /* hwrm_func_vf_cfg */ -/* Input (24 bytes) */ +/* Input (32 bytes) */ struct hwrm_func_vf_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -871,8 +874,11 @@ struct hwrm_func_vf_cfg_input { __le32 enables; #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL __le16 mtu; __le16 guest_vlan; + __le16 async_event_cr; + __le16 unused_0[3]; }; /* Output (16 bytes) */ @@ -944,7 +950,7 @@ struct hwrm_func_cfg_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le16 vf_id; + __le16 fid; u8 unused_0; u8 unused_1; __le32 flags; @@ -1000,10 +1006,6 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0) #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) u8 allowed_vlan_pris; - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) u8 evb_mode; #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0) #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0) @@ -1166,6 +1168,15 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0) u8 ver_maj; u8 ver_min; u8 ver_upd; @@ -1276,9 +1287,7 @@ struct hwrm_func_drv_qver_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 enables; - #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL - #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL + __le32 reserved; __le16 fid; __le16 unused_0; }; @@ -1290,6 +1299,15 @@ struct hwrm_func_drv_qver_output { __le16 seq_id; __le16 resp_len; __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0) u8 ver_maj; u8 ver_min; u8 ver_upd; @@ -1498,9 +1516,7 @@ struct hwrm_port_phy_qcfg_output { u8 force_pause; #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL - u8 duplex_setting; - #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0) - #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0) + u8 reserved1; __le32 preemphasis; u8 phy_maj; u8 phy_min; @@ -1601,33 +1617,6 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; -/* hwrm_port_enable */ -/* Input (24 bytes) */ -struct hwrm_port_enable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL - __le16 port_id; - __le16 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_port_enable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1651,10 +1640,11 @@ struct hwrm_port_qstats_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - __le32 unused_0; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; u8 valid; }; @@ -1668,7 +1658,7 @@ struct hwrm_port_lpbk_qstats_input { __le64 resp_addr; }; -/* Output (64 bytes) */ +/* Output (96 bytes) */ struct hwrm_port_lpbk_qstats_output { __le16 error_code; __le16 req_type; @@ -1680,6 +1670,10 @@ struct hwrm_port_lpbk_qstats_output { __le64 lpbk_ucast_bytes; __le64 lpbk_mcast_bytes; __le64 lpbk_bcast_bytes; + __le64 tx_stat_discard; + __le64 tx_stat_error; + __le64 rx_stat_discard; + __le64 rx_stat_error; __le32 unused_0; u8 unused_1; u8 unused_2; @@ -1884,12 +1878,11 @@ struct hwrm_queue_buffers_cfg_input { __le32 enables; #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL __le32 queue_id; __le32 reserved; __le32 shared; @@ -1921,15 +1914,15 @@ struct hwrm_queue_pfcenable_cfg_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 enables; - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL __le16 port_id; __le16 unused_0; }; @@ -1962,14 +1955,14 @@ struct hwrm_queue_pri2cos_cfg_input { #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL __le32 enables; u8 port_id; - u8 pri0_cos; - u8 pri1_cos; - u8 pri2_cos; - u8 pri3_cos; - u8 pri4_cos; - u8 pri5_cos; - u8 pri6_cos; - u8 pri7_cos; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; u8 unused_0[7]; }; @@ -2164,6 +2157,7 @@ struct hwrm_vnic_cfg_input { __le32 flags; #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -2380,18 +2374,16 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL - #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL - #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL + #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL + #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL + #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL + #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 unused_1; __le64 page_tbl_addr; @@ -2406,17 +2398,17 @@ struct hwrm_ring_alloc_input { __le16 queue_id; u8 unused_4; u8 unused_5; - __le32 arb_grp_id; - __le16 input_number; + __le32 reserved1; + __le16 reserved2; u8 unused_6; u8 unused_7; - __le32 weight; + __le32 reserved3; __le32 stat_ctx_id; - __le32 min_bw; + __le32 reserved4; __le32 max_bw; u8 int_mode; #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0) - #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0) + #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0) #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0) #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0) u8 unused_8[3]; @@ -2448,8 +2440,6 @@ struct hwrm_ring_free_input { #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -2550,8 +2540,6 @@ struct hwrm_ring_reset_input { #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -2622,61 +2610,6 @@ struct hwrm_ring_grp_free_output { u8 valid; }; -/* hwrm_arb_grp_alloc */ -/* Input (24 bytes) */ -struct hwrm_arb_grp_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 input_number; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_arb_grp_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 arb_grp_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_arb_grp_cfg */ -/* Input (32 bytes) */ -struct hwrm_arb_grp_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 arb_grp_id; - __le16 input_number; - __le16 tx_ring; - __le32 weight; - __le32 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_arb_grp_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_cfa_l2_filter_alloc */ /* Input (96 bytes) */ struct hwrm_cfa_l2_filter_alloc_input { @@ -2708,7 +2641,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL u8 l2_addr[6]; u8 unused_0; @@ -2751,7 +2684,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_7; - __le16 dst_vnic_id; + __le16 dst_id; __le16 mirror_vnic_id; u8 pri_hint; #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) @@ -2816,10 +2749,11 @@ struct hwrm_cfa_l2_filter_cfg_input { #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL __le32 enables; - #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL __le64 l2_filter_id; - __le32 dst_vnic_id; - __le32 unused_0; + __le32 dst_id; + __le32 new_mirror_vnic_id; }; /* Output (16 bytes) */ @@ -2843,9 +2777,9 @@ struct hwrm_cfa_l2_set_rx_mask_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 dflt_vnic_id; + __le32 vnic_id; __le32 mask; - #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL @@ -2869,46 +2803,6 @@ struct hwrm_cfa_l2_set_rx_mask_output { u8 valid; }; -/* hwrm_cfa_l2_set_bcastmcast_mirroring */ -/* Input (32 bytes) */ -struct hwrm_cfa_l2_set_bcastmcast_mirroring_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 dflt_vnic_id; - __le32 mirroring_flags; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL - __le16 vlan_id; - u8 bcast_domain; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) - u8 mcast_domain; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) - __le32 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_cfa_l2_set_bcastmcast_mirroring_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_cfa_tunnel_filter_alloc */ /* Input (88 bytes) */ struct hwrm_cfa_tunnel_filter_alloc_input { @@ -3017,17 +2911,16 @@ struct hwrm_cfa_encap_record_alloc_input { __le32 encap_data[16]; }; -/* Output (24 bytes) */ +/* Output (16 bytes) */ struct hwrm_cfa_encap_record_alloc_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; - __le64 encap_record_id; - __le32 unused_0; + __le32 encap_record_id; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; u8 valid; }; @@ -3039,7 +2932,8 @@ struct hwrm_cfa_encap_record_free_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le64 encap_record_id; + __le32 encap_record_id; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -3083,14 +2977,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL __le64 l2_filter_id; u8 src_macaddr[6]; __be16 ethertype; - u8 ipaddr_type; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0) u8 ip_protocol; - __le16 dst_vnic_id; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0) + __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) @@ -3104,6 +3005,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0) __be32 src_ipaddr[4]; __be32 src_ipaddr_mask[4]; __be32 dst_ipaddr[4]; @@ -3162,11 +3068,11 @@ struct hwrm_cfa_ntuple_filter_cfg_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL __le32 unused_0; __le64 ntuple_filter_id; - __le32 new_dst_vnic_id; + __le32 new_dst_id; __le32 new_mirror_vnic_id; }; @@ -3192,16 +3098,8 @@ struct hwrm_tunnel_dst_port_query_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0[7]; }; @@ -3228,16 +3126,8 @@ struct hwrm_tunnel_dst_port_alloc_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0; __be16 tunnel_dst_port_val; __le32 unused_1; @@ -3267,16 +3157,8 @@ struct hwrm_tunnel_dst_port_free_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0; __le16 tunnel_dst_port_id; __le32 unused_1; @@ -3416,68 +3298,145 @@ struct hwrm_stat_ctx_clr_stats_output { u8 valid; }; -/* hwrm_mgmt_l2_filter_alloc */ -/* Input (56 bytes) */ -struct hwrm_mgmt_l2_filter_alloc_input { +/* hwrm_fw_reset */ +/* Input (24 bytes) */ +struct hwrm_fw_reset_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 flags; - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) - __le32 enables; - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL - u8 l2_address[6]; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0) + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_exec_fwd_resp */ +/* Input (128 bytes) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; u8 unused_1; - u8 l2_address_mask[6]; - __le16 ovlan; - __le16 ovlan_mask; - __le16 ivlan; - __le16 ivlan_mask; u8 unused_2; u8 unused_3; - __le32 action_id; - u8 action_bypass; - #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL - u8 unused_5[3]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp */ +/* Input (128 bytes) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; }; /* Output (16 bytes) */ -struct hwrm_mgmt_l2_filter_alloc_output { +struct hwrm_reject_fwd_resp_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; - __le16 mgmt_l2_filter_id; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_resp */ +/* Input (40 bytes) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; u8 unused_0; u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; u8 unused_2; u8 unused_3; - u8 unused_4; u8 valid; }; -/* hwrm_mgmt_l2_filter_free */ -/* Input (24 bytes) */ -struct hwrm_mgmt_l2_filter_free_input { +/* hwrm_fwd_async_event_cmpl */ +/* Input (32 bytes) */ +struct hwrm_fwd_async_event_cmpl_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le16 mgmt_l2_filter_id; - __le16 unused_0[3]; + __le16 encap_async_event_target_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le32 encap_async_event_cmpl[4]; }; /* Output (16 bytes) */ -struct hwrm_mgmt_l2_filter_free_output { +struct hwrm_fwd_async_event_cmpl_output { __le16 error_code; __le16 req_type; __le16 seq_id; @@ -3489,6 +3448,31 @@ struct hwrm_mgmt_l2_filter_free_output { u8 valid; }; +/* hwrm_temp_monitor_query */ +/* Input (16 bytes) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + /* hwrm_nvm_raw_write_blk */ /* Input (32 bytes) */ struct hwrm_nvm_raw_write_blk_input { @@ -3621,7 +3605,7 @@ struct hwrm_nvm_get_dir_info_output { }; /* hwrm_nvm_write */ -/* Input (40 bytes) */ +/* Input (48 bytes) */ struct hwrm_nvm_write_input { __le16 req_type; __le16 cmpl_ring; @@ -3637,6 +3621,8 @@ struct hwrm_nvm_write_input { __le16 option; __le16 flags; #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + __le32 dir_item_length; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -3645,10 +3631,9 @@ struct hwrm_nvm_write_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; u8 valid; }; @@ -3833,214 +3818,4 @@ struct hwrm_nvm_verify_update_output { u8 valid; }; -/* hwrm_exec_fwd_resp */ -/* Input (120 bytes) */ -struct hwrm_exec_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[24]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_exec_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_reject_fwd_resp */ -/* Input (120 bytes) */ -struct hwrm_reject_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[24]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_reject_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_resp */ -/* Input (40 bytes) */ -struct hwrm_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_resp_target_id; - __le16 encap_resp_cmpl_ring; - __le16 encap_resp_len; - u8 unused_0; - u8 unused_1; - __le64 encap_resp_addr; - __le32 encap_resp[24]; -}; - -/* Output (16 bytes) */ -struct hwrm_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_async_event_cmpl */ -/* Input (32 bytes) */ -struct hwrm_fwd_async_event_cmpl_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_async_event_target_id; - u8 unused_0; - u8 unused_1; - u8 unused_2[3]; - u8 unused_3; - __le32 encap_async_event_cmpl[4]; -}; - -/* Output (16 bytes) */ -struct hwrm_fwd_async_event_cmpl_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fw_reset */ -/* Input (24 bytes) */ -struct hwrm_fw_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) - u8 selfrst_status; - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_fw_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_fw_qstatus */ -/* Input (24 bytes) */ -struct hwrm_fw_qstatus_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ -struct hwrm_fw_qstatus_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_temp_monitor_query */ -/* Input (16 bytes) */ -struct hwrm_temp_monitor_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; -}; - -/* Output (16 bytes) */ -struct hwrm_temp_monitor_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 temp; - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7a9af2887d8e..c1cc83d7e38c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -21,7 +21,7 @@ #ifdef CONFIG_BNXT_SRIOV static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) { - if (bp->state != BNXT_STATE_OPEN) { + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { netdev_err(bp->dev, "vf ndo called though PF is down\n"); return -EINVAL; } @@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) * the spoof check should also include vlan anti-spoofing */ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { @@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); @@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); @@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); @@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) } /* only call by PF to reserve resources for VF */ -static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) +static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; + u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; @@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) * be removed once new HWRM provides HW ring groups capability in * hwrm_func_qcap. */ - vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); - vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; + vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); + vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; /* TODO: restore this logic below once the WA above is removed */ - /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ - vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; + /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ + vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) - vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / - *num_vfs; + vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / + num_vfs; else - vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / - *num_vfs; - vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; + vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; + vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; + vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | @@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS); + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; req.mru = cpu_to_le16(mtu); @@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); req.num_tx_rings = cpu_to_le16(vf_tx_rings); req.num_rx_rings = cpu_to_le16(vf_rx_rings); + req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); vf_vnics = 1; @@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); mutex_lock(&bp->hwrm_cmd_lock); - for (i = 0; i < *num_vfs; i++) { - req.vf_id = cpu_to_le16(pf->first_vf_id + i); + for (i = 0; i < num_vfs; i++) { + req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; - bp->pf.active_vfs = i + 1; - bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); + pf->active_vfs = i + 1; + pf->vf[i].fw_fid = le16_to_cpu(req.fid); } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { - bp->pf.max_pf_tx_rings = bp->tx_nr_rings; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; - else - bp->pf.max_pf_rx_rings = bp->rx_nr_rings; + pf->max_tx_rings -= vf_tx_rings * num_vfs; + pf->max_rx_rings -= vf_rx_rings * num_vfs; + pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; + pf->max_cp_rings -= vf_cp_rings * num_vfs; + pf->max_rsscos_ctxs -= num_vfs; + pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; + pf->max_vnics -= vf_vnics * num_vfs; } return rc; } @@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) goto err_out1; /* Reserve resources for VFs */ - rc = bnxt_hwrm_func_cfg(bp, num_vfs); + rc = bnxt_hwrm_func_cfg(bp, *num_vfs); if (rc) goto err_out2; @@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); bp->pf.active_vfs = 0; - bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; - bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; + /* Reclaim all resources for the PF. */ + bnxt_hwrm_func_qcaps(bp); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) @@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); req.encap_resp_len = cpu_to_le16(msg_size); req.encap_resp_addr = encap_resp_addr; req.encap_resp_cmpl_ring = encap_resp_cpr; @@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); mutex_lock(&bp->hwrm_cmd_lock); @@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); mutex_lock(&bp->hwrm_cmd_lock); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 17f017ab4dac..b15a60d787c7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2041,11 +2041,11 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); } ring = &priv->tx_rings[DESC_INDEX]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); } static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 8bdfe53754ba..0d775964b060 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -401,9 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; - else - priv->mii_bus->irq[phydev->addr] = PHY_POLL; + priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; return 0; } @@ -477,12 +475,6 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", priv->pdev->name, priv->pdev->id); - bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!bus->irq) { - mdiobus_free(priv->mii_bus); - return -ENOMEM; - } - return 0; } @@ -581,7 +573,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) } if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdio->phy_map[pd->phy_address]; + phydev = mdiobus_get_phy(mdio, pd->phy_address); else phydev = phy_find_first(mdio); @@ -648,7 +640,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; } @@ -659,6 +650,5 @@ void bcmgenet_mii_exit(struct net_device *dev) of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index f557a2aaec23..eacc559679bf 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -238,7 +238,6 @@ struct sbmac_softc { struct napi_struct napi; struct phy_device *phy_dev; /* the associated PHY device */ struct mii_bus *mii_bus; /* the MII bus */ - int phy_irq[PHY_MAX_ADDR]; spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ @@ -2250,9 +2249,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) sc->mii_bus->priv = sc; sc->mii_bus->read = sbmac_mii_read; sc->mii_bus->write = sbmac_mii_write; - sc->mii_bus->irq = sc->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - sc->mii_bus->irq[i] = SBMAC_PHY_INT; sc->mii_bus->parent = &pldev->dev; /* @@ -2358,20 +2354,15 @@ static int sbmac_mii_probe(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev; - int i; - for (i = 0; i < PHY_MAX_ADDR; i++) { - phy_dev = sc->mii_bus->phy_map[i]; - if (phy_dev) - break; - } + phy_dev = phy_find_first(sc->mii_bus); if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO; } - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, - PHY_INTERFACE_MODE_GMII); + phy_dev = phy_connect(dev, dev_name(&phy_dev->mdio.dev), + &sbmac_mii_poll, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev); @@ -2388,11 +2379,10 @@ static int sbmac_mii_probe(struct net_device *dev) SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phy_dev->drv->name, - dev_name(&phy_dev->dev), phy_dev->irq); + phy_attached_info(phy_dev); + + phy_dev->advertising = phy_dev->supported; sc->phy_dev = phy_dev; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 79789d8e52da..9293675df7ba 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1406,7 +1406,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: @@ -1538,10 +1538,6 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); - tp->mdio_bus->irq = &tp->mdio_irq[0]; - - for (i = 0; i < PHY_MAX_ADDR; i++) - tp->mdio_bus->irq[i] = PHY_POLL; /* The bus registration will look for all the PHYs on the mdio bus. * Unfortunately, it does not ensure the PHY is powered up before @@ -1558,7 +1554,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1968,7 +1964,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; + autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; else autoneg = tp->link_config.autoneg; @@ -2004,7 +2000,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); spin_lock_bh(&tp->lock); @@ -2093,10 +2089,10 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); /* Attach the MAC to the PHY. */ - phydev = phy_connect(tp->dev, dev_name(&phydev->dev), + phydev = phy_connect(tp->dev, phydev_name(phydev), tg3_adjust_link, phydev->interface); if (IS_ERR(phydev)) { dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); @@ -2120,7 +2116,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); return -EINVAL; } @@ -2128,6 +2124,8 @@ static int tg3_phy_init(struct tg3 *tp) phydev->advertising = phydev->supported; + phy_attached_info(phydev); + return 0; } @@ -2138,7 +2136,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2158,13 +2156,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } @@ -4048,7 +4046,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -12076,7 +12074,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_gset(phydev, cmd); } @@ -12143,7 +12141,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_sset(phydev, cmd); } @@ -12298,7 +12296,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); + r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } else { u32 bmcr; @@ -12416,7 +12414,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -13926,7 +13924,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_mii_ioctl(phydev, ifr, cmd); } @@ -17898,13 +17896,7 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_bus_string(tp, str), dev->dev_addr); - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); - } else { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { char *ethtype; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 31c9f8295953..3b5e98ecba00 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3254,7 +3254,6 @@ struct tg3 { int pcie_readrq; struct mii_bus *mdio_bus; - int mdio_irq[PHY_MAX_ADDR]; int old_link; u8 phy_addr; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 169059c92f80..c56347536f6b 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -28,6 +29,7 @@ #include <linux/phy.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> @@ -439,12 +441,6 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->parent = &bp->dev->dev; pdata = dev_get_platdata(&bp->pdev->dev); - bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; @@ -469,9 +465,6 @@ static int macb_mii_init(struct macb *bp) goto err_out_unregister_bus; } } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (pdata) bp->mii_bus->phy_mask = pdata->phy_mask; @@ -479,7 +472,7 @@ static int macb_mii_init(struct macb *bp) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; err = macb_mii_probe(bp->dev); if (err) @@ -489,8 +482,6 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); -err_out_free_mdio_irq: - kfree(bp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(bp->mii_bus); err_out: @@ -2122,7 +2113,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); regs_buff[11] = macb_tx_dma(&bp->queues[0], head); - regs_buff[12] = macb_or_gem_readl(bp, USRIO); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + regs_buff[12] = macb_or_gem_readl(bp, USRIO); if (macb_is_gem(bp)) { regs_buff[13] = gem_readl(bp, DMACFG); } @@ -2401,19 +2393,21 @@ static int macb_init(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; - val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) - val = GEM_BIT(RGMII); - else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(MII); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); - if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) - val |= MACB_BIT(CLKEN); + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); - macb_or_gem_writel(bp, USRIO, val); + macb_or_gem_writel(bp, USRIO, val); + } /* Set MII management clock divider */ val = macb_mdc_clk_div(bp); @@ -2776,6 +2770,11 @@ static const struct macb_config emac_config = { .init = at91ether_init, }; +static const struct macb_config np4_config = { + .caps = MACB_CAPS_USRIO_DISABLED, + .clk_init = macb_clk_init, + .init = macb_init, +}; static const struct macb_config zynqmp_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, @@ -2796,6 +2795,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,at32ap7000-macb" }, { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, { .compatible = "cdns,macb" }, + { .compatible = "cdns,np4-macb", .data = &np4_config }, { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, @@ -2817,6 +2817,7 @@ static int macb_probe(struct platform_device *pdev) = macb_clk_init; int (*init)(struct platform_device *) = macb_init; struct device_node *np = pdev->dev.of_node; + struct device_node *phy_node; const struct macb_config *macb_config = NULL; struct clk *pclk, *hclk, *tx_clk; unsigned int queue_mask, num_queues; @@ -2904,6 +2905,16 @@ static int macb_probe(struct platform_device *pdev) else macb_get_hwaddr(bp); + /* Power up the PHY if there is a GPIO reset */ + phy_node = of_get_next_available_child(np, NULL); + if (phy_node) { + int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); + if (gpio_is_valid(gpio)) + bp->reset_gpio = gpio_to_desc(gpio); + gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH); + } + of_node_put(phy_node); + err = of_get_phy_mode(np); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); @@ -2937,8 +2948,7 @@ static int macb_probe(struct platform_device *pdev) dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; @@ -2968,8 +2978,11 @@ static int macb_remove(struct platform_device *pdev) if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); - kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); + + /* Shutdown the PHY if there is a GPIO reset */ + gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW); + unregister_netdev(dev); clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d83b0db77821..0d4ecfcd60b7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -400,6 +400,7 @@ #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 +#define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -829,6 +830,7 @@ struct macb { unsigned int dma_burst_length; phy_interface_t phy_interface; + struct gpio_desc *reset_gpio; /* AT91RM9200 transmit */ struct sk_buff *skb; /* holds skb until xmit interrupt completes */ diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 39ca6744a4e6..688828865c48 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -265,6 +265,7 @@ struct nicvf { u8 tns_mode:1; u8 sqs_mode:1; u8 loopback_supported:1; + bool hw_tso; u16 mtu; struct queue_set *qs; #define MAX_SQS_PER_VF_SINGLE_NODE 5 @@ -489,6 +490,11 @@ static inline int nic_get_node_id(struct pci_dev *pdev) return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK); } +static inline bool pass1_silicon(struct pci_dev *pdev) +{ + return pdev->revision < 8; +} + int nicvf_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); int nicvf_open(struct net_device *netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4b7fd63ae57c..4dded90076c8 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -37,7 +37,6 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 vf_lmac_map[MAX_LMAC]; - u8 lmac_cnt; struct delayed_work dwork; struct workqueue_struct *check_link; u8 link[MAX_LMAC]; @@ -55,11 +54,6 @@ struct nicpf { bool irq_allocated[NIC_PF_MSIX_VECTORS]; }; -static inline bool pass1_silicon(struct nicpf *nic) -{ - return nic->pdev->revision < 8; -} - /* Supported devices */ static const struct pci_device_id nic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, @@ -123,7 +117,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) * when PF writes to MBOX(1), in next revisions when * PF writes to MBOX(0) */ - if (pass1_silicon(nic)) { + if (pass1_silicon(nic->pdev)) { /* see the comment for nic_reg_write()/nic_reg_read() * functions above */ @@ -280,7 +274,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) u64 lmac_credit; nic->num_vf_en = 0; - nic->lmac_cnt = 0; for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { if (!(bgx_map & (1 << bgx))) @@ -290,7 +283,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->vf_lmac_map[next_bgx_lmac++] = NIC_SET_VF_LMAC_MAP(bgx, lmac); nic->num_vf_en += lmac_cnt; - nic->lmac_cnt += lmac_cnt; /* Program LMAC credits */ lmac_credit = (1ull << 1); /* channel credit enable */ @@ -400,7 +392,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ /* Leave RSS_SIZE as '0' to disable RSS */ - if (pass1_silicon(nic)) { + if (pass1_silicon(nic->pdev)) { nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), (vnic << 24) | (padd << 16) | (rssi_base + rssi)); @@ -470,7 +462,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) } cpi_base = nic->cpi_base[cfg->vf_id]; - if (pass1_silicon(nic)) + if (pass1_silicon(nic->pdev)) idx_addr = NIC_PF_CPI_0_2047_CFG; else idx_addr = NIC_PF_MPI_0_2047_CFG; @@ -618,6 +610,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) return 0; } +static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) +{ + int bgx, lmac; + + nic->vf_enabled[vf] = enable; + + if (vf >= nic->num_vf_en) + return; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -717,29 +724,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ - nic->vf_enabled[vf] = true; - if (vf >= nic->lmac_cnt) - goto unlock; - - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); + nic_enable_vf(nic, vf, true); goto unlock; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ - nic->vf_enabled[vf] = false; if (vf >= nic->num_vf_en) nic->sqs_used[vf - nic->num_vf_en] = false; nic->pqs_vf[vf] = 0; - - if (vf >= nic->lmac_cnt) - break; - - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); + nic_enable_vf(nic, vf, false); break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); @@ -958,7 +950,7 @@ static void nic_poll_for_link(struct work_struct *work) mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - for (vf = 0; vf < nic->lmac_cnt; vf++) { + for (vf = 0; vf < nic->num_vf_en; vf++) { /* Poll only if VF is UP */ if (!nic->vf_enabled[vf]) continue; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index dde8dc720cd3..c24cb2a86a42 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -525,14 +525,22 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); - nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; - /* For TSO offloaded packets only one head SKB needs to be freed */ + /* For TSO offloaded packets only one SQE will have a valid SKB */ if (skb) { + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); dev_consume_skb_any(skb); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; + } else { + /* In case of HW TSO, HW sends a CQE for each segment of a TSO + * packet instead of a single CQE for the whole TSO packet + * transmitted. Each of this CQE points to the same SQE, so + * avoid freeing same SQE multiple times. + */ + if (!nic->hw_tso) + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } } @@ -1549,6 +1557,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + if (!pass1_silicon(nic->pdev)) + nic->hw_tso = true; + netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 206b6a71a545..d0d1b5490061 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -18,14 +18,6 @@ #include "q_struct.h" #include "nicvf_queues.h" -struct rbuf_info { - struct page *page; - void *data; - u64 offset; -}; - -#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) - /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, u64 reg, int bit_pos, int bits, int val) @@ -86,8 +78,6 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - u64 data; - struct rbuf_info *rinfo; int order = get_order(buf_len); /* Check if request can be accomodated in previous allocated page */ @@ -113,46 +103,28 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page_offset = 0; } - data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; - - /* Align buffer addr to cache line i.e 128 bytes */ - rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); - /* Save page address for reference updation */ - rinfo->page = nic->rb_page; - /* Store start address for later retrieval */ - rinfo->data = (void *)data; - /* Store alignment offset */ - rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); + *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); - data += rinfo->offset; - - /* Give next aligned address to hw for DMA */ - *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); return 0; } -/* Retrieve actual buffer start address and build skb for received packet */ +/* Build skb around receive buffer */ static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, u64 rb_ptr, int len) { + void *data; struct sk_buff *skb; - struct rbuf_info *rinfo; - rb_ptr = (u64)phys_to_virt(rb_ptr); - /* Get buffer start address and alignment offset */ - rinfo = GET_RBUF_INFO(rb_ptr); + data = phys_to_virt(rb_ptr); /* Now build an skb to give to stack */ - skb = build_skb(rinfo->data, RCV_FRAG_LEN); + skb = build_skb(data, RCV_FRAG_LEN); if (!skb) { - put_page(rinfo->page); + put_page(virt_to_page(data)); return NULL; } - /* Set correct skb->data */ - skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); - - prefetch((void *)rb_ptr); + prefetch(skb->data); return skb; } @@ -196,7 +168,6 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) int head, tail; u64 buf_addr; struct rbdr_entry_t *desc; - struct rbuf_info *rinfo; if (!rbdr) return; @@ -212,16 +183,14 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); - put_page(rinfo->page); + put_page(virt_to_page(phys_to_virt(buf_addr))); head++; head &= (rbdr->dmem.q_len - 1); } /* Free SKB of tail desc */ desc = GET_RBDR_DESC(rbdr, tail); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); - put_page(rinfo->page); + put_page(virt_to_page(phys_to_virt(buf_addr))); /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); @@ -330,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic, return err; cq->desc = cq->dmem.base; - cq->thresh = CMP_QUEUE_CQE_THRESH; + cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; return 0; @@ -956,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) { int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; - if (skb_shinfo(skb)->gso_size) { + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { subdesc_cnt = nicvf_tso_count_subdescs(skb); return subdesc_cnt; } @@ -971,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) * First subdescriptor for every send descriptor. */ static inline void -nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, +nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, int subdesc_cnt, struct sk_buff *skb, int len) { int proto; @@ -1007,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, break; } } + + if (nic->hw_tso && skb_shinfo(skb)->gso_size) { + hdr->tso = 1; + hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); + hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; + /* For non-tunneled pkts, point this to L2 ethertype */ + hdr->inner_l3_offset = skb_network_offset(skb) - 2; + nic->drv_stats.tx_tso++; + } } /* SQ GATHER subdescriptor @@ -1076,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, data_left -= size; tso_build_data(skb, &tso, size); } - nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, + nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, seg_subdescs - 1, skb, seg_len); sq->skbuff[hdr_qentry] = (u64)NULL; qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1129,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Check if its a TSO packet */ - if (skb_shinfo(skb)->gso_size) + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); /* Add SQ header subdesc */ - nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); + nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, + skb, skb->len); /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1234,153 +1213,93 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) return skb; } -/* Enable interrupt */ -void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +static u64 nicvf_int_type_to_mask(int int_type, int q_idx) { u64 reg_val; - reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); - switch (int_type) { case NICVF_INTR_CQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: - reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: - reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: - reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); break; default: - netdev_err(nic->netdev, - "Failed to enable interrupt: unknown type\n"); - break; + reg_val = 0; } - nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); + return reg_val; +} + +/* Enable interrupt */ +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); + + if (!mask) { + netdev_dbg(nic->netdev, + "Failed to enable interrupt: unknown type\n"); + return; + } + nicvf_reg_write(nic, NIC_VF_ENA_W1S, + nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); } /* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val = 0; + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); - switch (int_type) { - case NICVF_INTR_CQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); - break; - case NICVF_INTR_TCP_TIMER: - reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); - break; - case NICVF_INTR_MBOX: - reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); - break; - case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); - break; - default: - netdev_err(nic->netdev, + if (!mask) { + netdev_dbg(nic->netdev, "Failed to disable interrupt: unknown type\n"); - break; + return; } - nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); + nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); } /* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val = 0; + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); - switch (int_type) { - case NICVF_INTR_CQ: - reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); - break; - case NICVF_INTR_TCP_TIMER: - reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); - break; - case NICVF_INTR_MBOX: - reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); - break; - case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); - break; - default: - netdev_err(nic->netdev, + if (!mask) { + netdev_dbg(nic->netdev, "Failed to clear interrupt: unknown type\n"); - break; + return; } - nicvf_reg_write(nic, NIC_VF_INT, reg_val); + nicvf_reg_write(nic, NIC_VF_INT, mask); } /* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val; - u64 mask = 0xff; - - reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); - - switch (int_type) { - case NICVF_INTR_CQ: - mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - mask = NICVF_INTR_PKT_DROP_MASK; - break; - case NICVF_INTR_TCP_TIMER: - mask = NICVF_INTR_TCP_TIMER_MASK; - break; - case NICVF_INTR_MBOX: - mask = NICVF_INTR_MBOX_MASK; - break; - case NICVF_INTR_QS_ERR: - mask = NICVF_INTR_QS_ERR_MASK; - break; - default: - netdev_err(nic->netdev, + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); + /* If interrupt type is unknown, we treat it disabled. */ + if (!mask) { + netdev_dbg(nic->netdev, "Failed to check interrupt enable: unknown type\n"); - break; + return 0; } - return (reg_val & mask); + return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); } void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 033e8306e91c..c5030a7f213a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -75,7 +75,7 @@ */ #define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) -#define CMP_QUEUE_CQE_THRESH 0 +#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ #define RBDR_SIZE RBDR_SIZE0 @@ -83,10 +83,8 @@ #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) #define RBDR_THRESH (RCV_BUF_COUNT / 2) #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ -#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ - (NICVF_RCV_BUF_ALIGN_BYTES * 2)) -#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES +#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) @@ -108,10 +106,6 @@ #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) -#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ - (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) -#define NICVF_RCV_BUF_ALIGN_LEN(X)\ - (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) /* Queue enable/disable */ #define NICVF_SQ_EN BIT_ULL(19) diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h index 3c1de97b1add..9e6d9876bfd0 100644 --- a/drivers/net/ethernet/cavium/thunder/q_struct.h +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -545,25 +545,28 @@ struct sq_hdr_subdesc { u64 subdesc_cnt:8; u64 csum_l4:2; u64 csum_l3:1; - u64 rsvd0:5; + u64 csum_inner_l4:2; + u64 csum_inner_l3:1; + u64 rsvd0:2; u64 l4_offset:8; u64 l3_offset:8; u64 rsvd1:4; u64 tot_len:20; /* W0 */ - u64 tso_sdc_cont:8; - u64 tso_sdc_first:8; - u64 tso_l4_offset:8; - u64 tso_flags_last:12; - u64 tso_flags_first:12; - u64 rsvd2:2; + u64 rsvd2:24; + u64 inner_l4_offset:8; + u64 inner_l3_offset:8; + u64 tso_start:8; + u64 rsvd3:2; u64 tso_max_paysize:14; /* W1 */ #elif defined(__LITTLE_ENDIAN_BITFIELD) u64 tot_len:20; u64 rsvd1:4; u64 l3_offset:8; u64 l4_offset:8; - u64 rsvd0:5; + u64 rsvd0:2; + u64 csum_inner_l3:1; + u64 csum_inner_l4:2; u64 csum_l3:1; u64 csum_l4:2; u64 subdesc_cnt:8; @@ -574,12 +577,11 @@ struct sq_hdr_subdesc { u64 subdesc_type:4; /* W0 */ u64 tso_max_paysize:14; - u64 rsvd2:2; - u64 tso_flags_first:12; - u64 tso_flags_last:12; - u64 tso_l4_offset:8; - u64 tso_sdc_first:8; - u64 tso_sdc_cont:8; /* W1 */ + u64 rsvd3:2; + u64 tso_start:8; + u64 inner_l3_offset:8; + u64 inner_l4_offset:8; + u64 rsvd2:24; /* W1 */ #endif }; diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index a79813a17b6e..4d187f22c48b 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -65,13 +65,14 @@ config CHELSIO_T3 will be called cxgb3. config CHELSIO_T4 - tristate "Chelsio Communications T4/T5 Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapter and T5 based 40Gb Ethernet adapter. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. @@ -85,7 +86,7 @@ config CHELSIO_T4 will be called cxgb4. config CHELSIO_T4_DCB - bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards" + bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards" default n depends on CHELSIO_T4 && DCB ---help--- @@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE If unsure, say N. config CHELSIO_T4VF - tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support" depends on PCI ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual - Functions. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters with PCI-E SR-IOV Virtual Functions. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h index a4d2a4c08d3f..bf43da6c6a63 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cphy.h +++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h @@ -137,7 +137,7 @@ static inline int simple_mdio_write(struct cphy *cphy, int reg, /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, struct net_device *dev, - int phy_addr, struct cphy_ops *phy_ops, + int phy_addr, const struct cphy_ops *phy_ops, const struct mdio_ops *mdio_ops) { struct adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c index 71018a4fdf15..76ce6e538326 100644 --- a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c @@ -337,7 +337,7 @@ static void mv88e1xxx_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops mv88e1xxx_ops = { +static const struct cphy_ops mv88e1xxx_ops = { .destroy = mv88e1xxx_destroy, .reset = mv88e1xxx_reset, .interrupt_enable = mv88e1xxx_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c index d0cf611551a1..7ddb301bcba0 100644 --- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c +++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c @@ -195,7 +195,7 @@ static void mv88x201x_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops mv88x201x_ops = { +static const struct cphy_ops mv88x201x_ops = { .destroy = mv88x201x_destroy, .reset = mv88x201x_reset, .interrupt_enable = mv88x201x_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c index a683fd3bb624..d546f46c8ef7 100644 --- a/drivers/net/ethernet/chelsio/cxgb/my3126.c +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -154,7 +154,7 @@ static void my3126_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops my3126_ops = { +static const struct cphy_ops my3126_ops = { .destroy = my3126_destroy, .reset = my3126_reset, .interrupt_enable = my3126_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index ec5e05052d99..eb462d7db427 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -570,7 +570,7 @@ static void pm3393_destroy(struct cmac *cmac) kfree(cmac); } -static struct cmac_ops pm3393_ops = { +static const struct cmac_ops pm3393_ops = { .destroy = pm3393_destroy, .reset = pm3393_reset, .interrupt_enable = pm3393_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index b0cb388f5e12..6f30b6f78553 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -666,7 +666,7 @@ static void mac_destroy(struct cmac *mac) kfree(mac); } -static struct cmac_ops vsc7326_ops = { +static const struct cmac_ops vsc7326_ops = { .destroy = mac_destroy, .reset = mac_reset, .interrupt_handler = mac_intr_handler, diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c index 2028da95afa1..dadf11e3dddb 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c +++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c @@ -198,7 +198,7 @@ static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, return 0; } -static struct cphy_ops ael1002_ops = { +static const struct cphy_ops ael1002_ops = { .reset = ael1002_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, @@ -224,7 +224,7 @@ static int ael1006_reset(struct cphy *phy, int wait) return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); } -static struct cphy_ops ael1006_ops = { +static const struct cphy_ops ael1006_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, @@ -495,7 +495,7 @@ static int ael2005_intr_handler(struct cphy *phy) return ret ? ret : cphy_cause_link_change; } -static struct cphy_ops ael2005_ops = { +static const struct cphy_ops ael2005_ops = { .reset = ael2005_reset, .intr_enable = ael2005_intr_enable, .intr_disable = ael2005_intr_disable, @@ -801,7 +801,7 @@ static int ael2020_intr_handler(struct cphy *phy) return ret ? ret : cphy_cause_link_change; } -static struct cphy_ops ael2020_ops = { +static const struct cphy_ops ael2020_ops = { .reset = ael2020_reset, .intr_enable = ael2020_intr_enable, .intr_disable = ael2020_intr_disable, @@ -856,7 +856,7 @@ static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, return 0; } -static struct cphy_ops qt2045_ops = { +static const struct cphy_ops qt2045_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, @@ -921,7 +921,7 @@ static int xaui_direct_power_down(struct cphy *phy, int enable) return 0; } -static struct cphy_ops xaui_direct_ops = { +static const struct cphy_ops xaui_direct_ops = { .reset = xaui_direct_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c index 341b7ef1508f..6af5d200e44f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c +++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c @@ -247,7 +247,7 @@ static int aq100x_get_link_status(struct cphy *phy, int *link_ok, return 0; } -static struct cphy_ops aq100x_ops = { +static const struct cphy_ops aq100x_ops = { .reset = aq100x_reset, .intr_enable = aq100x_intr_enable, .intr_disable = aq100x_intr_disable, diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h index 442480982d3f..1bd7d89666c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -575,7 +575,7 @@ static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg, /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, struct adapter *adapter, - int phy_addr, struct cphy_ops *phy_ops, + int phy_addr, const struct cphy_ops *phy_ops, const struct mdio_ops *mdio_ops, unsigned int caps, const char *desc) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 8f7aa53a4c4b..60908eab3b3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -701,15 +701,16 @@ static ssize_t attr_store(struct device *d, ssize_t(*set) (struct net_device *, unsigned int), unsigned int min_val, unsigned int max_val) { - char *endp; ssize_t ret; unsigned int val; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val < min_val || val > max_val) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val < min_val || val > max_val) return -EINVAL; rtnl_lock(); @@ -829,14 +830,15 @@ static ssize_t tm_attr_store(struct device *d, struct port_info *pi = netdev_priv(to_net_dev(d)); struct adapter *adap = pi->adapter; unsigned int val; - char *endp; ssize_t ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val > 10000000) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val > 10000000) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index a22768c94200..ee04caa6c4d8 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -709,11 +709,21 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) return ret; } - p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); - p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); - p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); - p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); - p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); + if (ret) + return ret; + ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); + if (ret) + return ret; + ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); + if (ret) + return ret; memcpy(p->sn, vpd.sn_data, SERNUM_LEN); /* Old eeproms didn't have port information */ @@ -723,8 +733,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) } else { p->port_type[0] = hex_to_bin(vpd.port0_data[0]); p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); - p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); + ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); + if (ret) + return ret; + ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); + if (ret) + return ret; } ret = hex2bin(p->eth_base, vpd.na_data, 6); diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c index 4f9a1c2724f4..8638ad42bf60 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c +++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c @@ -336,7 +336,7 @@ static int vsc8211_intr_handler(struct cphy *cphy) return cphy_cause; } -static struct cphy_ops vsc8211_ops = { +static const struct cphy_ops vsc8211_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, @@ -350,7 +350,7 @@ static struct cphy_ops vsc8211_ops = { .power_down = vsc8211_power_down, }; -static struct cphy_ops vsc8211_fiber_ops = { +static const struct cphy_ops vsc8211_fiber_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index c308429dd9c7..7ad43af6bde1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -118,6 +118,11 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); + dev_err(adap->pdev_dev, + "CLIP FW cmd failed with error %d, " + "Connections using %pI6c wont be " + "offloaded", + ret, ce->addr6.sin6_addr.s6_addr); return ret; } } else { @@ -127,6 +132,9 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) } } else { write_unlock_bh(&ctbl->lock); + dev_info(adap->pdev_dev, "CLIP table overflow, " + "Connections using %pI6c wont be offloaded", + (void *)lip); return -ENOMEM; } write_unlock_bh(&ctbl->lock); @@ -146,6 +154,9 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) int hash; int ret = -1; + if (!ctbl) + return; + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); @@ -295,6 +306,10 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, INIT_LIST_HEAD(&ctbl->hash_list[i]); cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); + if (!cl_list) { + t4_free_mem(ctbl); + return NULL; + } ctbl->cl_list = (void *)cl_list; for (i = 0; i < clipt_size; i++) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 55a47de544ea..ec6e849676c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -301,6 +301,8 @@ struct devlog_params { /* Stores chip specific parameters */ struct arch_specific_params { u8 nchan; + u8 pm_stats_cnt; + u8 cng_ch_bits_log; /* congestion channel map bits width */ u16 mps_rplc_size; u16 vfcount; u32 sge_fl_db; @@ -398,11 +400,10 @@ struct link_config { enum { MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ - MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ - MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ }; enum { @@ -420,7 +421,7 @@ enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES - + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, + + MAX_RDMA_CIQS + INGQ_EXTRAS, }; struct adapter; @@ -483,6 +484,8 @@ struct sge_fl { /* SGE free-buffer queue state */ unsigned int pidx; /* producer index */ unsigned long alloc_failed; /* # of times buffer allocation failed */ unsigned long large_alloc_failed; + unsigned long mapping_err; /* # of RX Buffer DMA Mapping failures */ + unsigned long low; /* # of times momentarily starving */ unsigned long starving; /* RO fields */ unsigned int cntxt_id; /* SGE context id for the free list */ @@ -618,6 +621,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */ struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ struct tasklet_struct qresume_tsk; /* restarts the queue */ + bool service_ofldq_running; /* service_ofldq() is processing sendq */ u8 full; /* the Tx ring is full */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ } ____cacheline_aligned_in_smp; @@ -636,7 +640,7 @@ struct sge { struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; - struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS]; struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; @@ -647,10 +651,10 @@ struct sge { u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ - u16 ofldqsets; /* # of active offload queue sets */ + u16 iscsiqsets; /* # of active iSCSI queue sets */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ - u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 rdma_rxq[MAX_RDMA_QUEUES]; u16 rdma_ciq[MAX_RDMA_CIQS]; u16 timer_val[SGE_NTIMERS]; @@ -676,7 +680,7 @@ struct sge { }; #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) -#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) @@ -1253,6 +1257,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force); +int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); int t4_get_fw_version(struct adapter *adapter, u32 *vers); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 4269944c5db5..e6a4072b494b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -757,8 +757,8 @@ static int pm_stats_show(struct seq_file *seq, void *v) }; int i; - u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS]; - u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS]; + u32 tx_cnt[T6_PM_NSTATS], rx_cnt[T6_PM_NSTATS]; + u64 tx_cyc[T6_PM_NSTATS], rx_cyc[T6_PM_NSTATS]; struct adapter *adap = seq->private; t4_pmtx_get_stats(adap, tx_cnt, tx_cyc); @@ -773,6 +773,32 @@ static int pm_stats_show(struct seq_file *seq, void *v) for (i = 0; i < PM_NSTATS - 1; i++) seq_printf(seq, "%-13s %10u %20llu\n", rx_pm_stats[i], rx_cnt[i], rx_cyc[i]); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + /* In T5 the granularity of the total wait is too fine. + * It is not useful as it reaches the max value too fast. + * Hence display this Input FIFO wait for T6 onwards. + */ + seq_printf(seq, "%13s %10s %20s\n", + " ", "Total wait", "Total Occupancy"); + seq_printf(seq, "Tx FIFO wait %10u %20llu\n", + tx_cnt[i], tx_cyc[i]); + seq_printf(seq, "Rx FIFO wait %10u %20llu\n", + rx_cnt[i], rx_cyc[i]); + + /* Skip index 6 as there is nothing useful ihere */ + i += 2; + + /* At index 7, a new stat for read latency (count, total wait) + * is added. + */ + seq_printf(seq, "%13s %10s %20s\n", + " ", "Reads", "Total wait"); + seq_printf(seq, "Tx latency %10u %20llu\n", + tx_cnt[i], tx_cyc[i]); + seq_printf(seq, "Rx latency %10u %20llu\n", + rx_cnt[i], rx_cyc[i]); + } return 0; } @@ -1559,25 +1585,35 @@ static int mps_tcam_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); - if (v == SEQ_START_TOKEN) { - if (adap->params.arch.mps_rplc_size > 128) + if (chip_ver > CHELSIO_T5) { seq_puts(seq, "Idx Ethernet address Mask " + " VNI Mask IVLAN Vld " + "DIP_Hit Lookup Port " "Vld Ports PF VF " "Replication " " P0 P1 P2 P3 ML\n"); - else - seq_puts(seq, "Idx Ethernet address Mask " - "Vld Ports PF VF Replication" - " P0 P1 P2 P3 ML\n"); + } else { + if (adap->params.arch.mps_rplc_size > 128) + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF " + "Replication " + " P0 P1 P2 P3 ML\n"); + else + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF Replication" + " P0 P1 P2 P3 ML\n"); + } } else { u64 mask; u8 addr[ETH_ALEN]; - bool replicate; + bool replicate, dip_hit = false, vlan_vld = false; unsigned int idx = (uintptr_t)v - 2; u64 tcamy, tcamx, val; - u32 cls_lo, cls_hi, ctl; + u32 cls_lo, cls_hi, ctl, data2, vnix = 0, vniy = 0; u32 rplc[8] = {0}; + u8 lookup_type = 0, port_num = 0; + u16 ivlan = 0; if (chip_ver > CHELSIO_T5) { /* CtlCmdType - 0: Read, 1: Write @@ -1596,6 +1632,22 @@ static int mps_tcam_show(struct seq_file *seq, void *v) val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamy = DMACH_G(val) << 32; tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); + lookup_type = DATALKPTYPE_G(data2); + /* 0 - Outer header, 1 - Inner header + * [71:48] bit locations are overloaded for + * outer vs. inner lookup types. + */ + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + /* Inner header VNI */ + vniy = ((data2 & DATAVIDH2_F) << 23) | + (DATAVIDH1_G(data2) << 16) | VIDL_G(val); + dip_hit = data2 & DATADIPHIT_F; + } else { + vlan_vld = data2 & DATAVIDH2_F; + ivlan = VIDL_G(val); + } + port_num = DATAPORTNUM_G(data2); /* Read tcamx. Change the control param */ ctl |= CTLXYBITSEL_V(1); @@ -1603,6 +1655,12 @@ static int mps_tcam_show(struct seq_file *seq, void *v) val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamx = DMACH_G(val) << 32; tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + /* Inner header VNI mask */ + vnix = ((data2 & DATAVIDH2_F) << 23) | + (DATAVIDH1_G(data2) << 16) | VIDL_G(val); + } } else { tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); @@ -1662,17 +1720,47 @@ static int mps_tcam_show(struct seq_file *seq, void *v) } tcamxy2valmask(tcamx, tcamy, addr, &mask); - if (chip_ver > CHELSIO_T5) - seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " - "%012llx%3c %#x%4u%4d", - idx, addr[0], addr[1], addr[2], addr[3], - addr[4], addr[5], (unsigned long long)mask, - (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', - PORTMAP_G(cls_hi), - T6_PF_G(cls_lo), - (cls_lo & T6_VF_VALID_F) ? - T6_VF_G(cls_lo) : -1); - else + if (chip_ver > CHELSIO_T5) { + /* Inner header lookup */ + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + seq_printf(seq, + "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx %06x %06x - - %3c" + " 'I' %4x " + "%3c %#x%4u%4d", idx, addr[0], + addr[1], addr[2], addr[3], + addr[4], addr[5], + (unsigned long long)mask, + vniy, vnix, dip_hit ? 'Y' : 'N', + port_num, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); + } else { + seq_printf(seq, + "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx - - ", + idx, addr[0], addr[1], addr[2], + addr[3], addr[4], addr[5], + (unsigned long long)mask); + + if (vlan_vld) + seq_printf(seq, "%4u Y ", ivlan); + else + seq_puts(seq, " - N "); + + seq_printf(seq, + "- %3c %4x %3c %#x%4u%4d", + lookup_type ? 'I' : 'O', port_num, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); + } + } else seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012llx%3c %#x%4u%4d", idx, addr[0], addr[1], addr[2], addr[3], @@ -2245,7 +2333,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); + int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); @@ -2325,14 +2413,16 @@ do { \ TL("TxMapErr:", mapping_err); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (iscsi_idx < iscsi_entries) { const struct sge_ofld_rxq *rx = - &adap->sge.ofldrxq[iscsi_idx * 4]; + &adap->sge.iscsirxq[iscsi_idx * 4]; const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[iscsi_idx * 4]; - int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx); + int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx); S("QType:", "iSCSI"); T("TxQ ID:", q.cntxt_id); @@ -2359,6 +2449,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (rdma_idx < rdma_entries) { @@ -2388,6 +2480,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (ciq_idx < ciq_entries) { @@ -2448,7 +2542,7 @@ do { \ static int sge_queue_entries(const struct adapter *adap) { return DIV_ROUND_UP(adap->sge.ethqsets, 4) + - DIV_ROUND_UP(adap->sge.ofldqsets, 4) + + DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + DIV_ROUND_UP(adap->sge.rdmaqs, 4) + DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index a077f9476daf..7a0b92b2f73c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) } static const char stats_strings[][ETH_GSTRING_LEN] = { - "tx_octets_ok ", - "tx_frames_ok ", - "tx_broadcast_frames ", - "tx_multicast_frames ", - "tx_unicast_frames ", - "tx_error_frames ", - - "tx_frames_64 ", - "tx_frames_65_to_127 ", - "tx_frames_128_to_255 ", - "tx_frames_256_to_511 ", - "tx_frames_512_to_1023 ", - "tx_frames_1024_to_1518 ", - "tx_frames_1519_to_max ", - - "tx_frames_dropped ", - "tx_pause_frames ", - "tx_ppp0_frames ", - "tx_ppp1_frames ", - "tx_ppp2_frames ", - "tx_ppp3_frames ", - "tx_ppp4_frames ", - "tx_ppp5_frames ", - "tx_ppp6_frames ", - "tx_ppp7_frames ", - - "rx_octets_ok ", - "rx_frames_ok ", - "rx_broadcast_frames ", - "rx_multicast_frames ", - "rx_unicast_frames ", - - "rx_frames_too_long ", - "rx_jabber_errors ", - "rx_fcs_errors ", - "rx_length_errors ", - "rx_symbol_errors ", - "rx_runt_frames ", - - "rx_frames_64 ", - "rx_frames_65_to_127 ", - "rx_frames_128_to_255 ", - "rx_frames_256_to_511 ", - "rx_frames_512_to_1023 ", - "rx_frames_1024_to_1518 ", - "rx_frames_1519_to_max ", - - "rx_pause_frames ", - "rx_ppp0_frames ", - "rx_ppp1_frames ", - "rx_ppp2_frames ", - "rx_ppp3_frames ", - "rx_ppp4_frames ", - "rx_ppp5_frames ", - "rx_ppp6_frames ", - "rx_ppp7_frames ", - - "rx_bg0_frames_dropped ", - "rx_bg1_frames_dropped ", - "rx_bg2_frames_dropped ", - "rx_bg3_frames_dropped ", - "rx_bg0_frames_trunc ", - "rx_bg1_frames_trunc ", - "rx_bg2_frames_trunc ", - "rx_bg3_frames_trunc ", - - "tso ", - "tx_csum_offload ", - "rx_csum_good ", - "vlan_extractions ", - "vlan_insertions ", - "gro_packets ", - "gro_merged ", + "tx_octets_ok ", + "tx_frames_ok ", + "tx_broadcast_frames ", + "tx_multicast_frames ", + "tx_unicast_frames ", + "tx_error_frames ", + + "tx_frames_64 ", + "tx_frames_65_to_127 ", + "tx_frames_128_to_255 ", + "tx_frames_256_to_511 ", + "tx_frames_512_to_1023 ", + "tx_frames_1024_to_1518 ", + "tx_frames_1519_to_max ", + + "tx_frames_dropped ", + "tx_pause_frames ", + "tx_ppp0_frames ", + "tx_ppp1_frames ", + "tx_ppp2_frames ", + "tx_ppp3_frames ", + "tx_ppp4_frames ", + "tx_ppp5_frames ", + "tx_ppp6_frames ", + "tx_ppp7_frames ", + + "rx_octets_ok ", + "rx_frames_ok ", + "rx_broadcast_frames ", + "rx_multicast_frames ", + "rx_unicast_frames ", + + "rx_frames_too_long ", + "rx_jabber_errors ", + "rx_fcs_errors ", + "rx_length_errors ", + "rx_symbol_errors ", + "rx_runt_frames ", + + "rx_frames_64 ", + "rx_frames_65_to_127 ", + "rx_frames_128_to_255 ", + "rx_frames_256_to_511 ", + "rx_frames_512_to_1023 ", + "rx_frames_1024_to_1518 ", + "rx_frames_1519_to_max ", + + "rx_pause_frames ", + "rx_ppp0_frames ", + "rx_ppp1_frames ", + "rx_ppp2_frames ", + "rx_ppp3_frames ", + "rx_ppp4_frames ", + "rx_ppp5_frames ", + "rx_ppp6_frames ", + "rx_ppp7_frames ", + + "rx_bg0_frames_dropped ", + "rx_bg1_frames_dropped ", + "rx_bg2_frames_dropped ", + "rx_bg3_frames_dropped ", + "rx_bg0_frames_trunc ", + "rx_bg1_frames_trunc ", + "rx_bg2_frames_trunc ", + "rx_bg3_frames_trunc ", + + "tso ", + "tx_csum_offload ", + "rx_csum_good ", + "vlan_extractions ", + "vlan_insertions ", + "gro_packets ", + "gro_merged ", }; static char adapter_stats_strings[][ETH_GSTRING_LEN] = { @@ -686,7 +686,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d147610a06f..b8a5fb0c32d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -162,19 +162,8 @@ MODULE_FIRMWARE(FW6_FNAME); static uint force_init; module_param(force_init, uint, 0644); -MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); - -/* - * Normally if the firmware we connect to has Configuration File support, we - * use that and only fall back to the old Driver-based initialization if the - * Configuration File fails for some reason. If force_old_init is set, then - * we'll always use the old Driver-based initialization sequence. - */ -static uint force_old_init; - -module_param(force_old_init, uint, 0644); -MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated" - " parameter"); +MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter," + "deprecated parameter"); static int dflt_msg_enable = DFLT_MSG_ENABLE; @@ -196,23 +185,6 @@ module_param(msi, int, 0644); MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); /* - * Queue interrupt hold-off timer values. Queues default to the first of these - * upon creation. - */ -static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; - -module_param_array(intr_holdoff, uint, NULL, 0644); -MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " - "0..4 in microseconds, deprecated parameter"); - -static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; - -module_param_array(intr_cnt, uint, NULL, 0644); -MODULE_PARM_DESC(intr_cnt, - "thresholds 1..3 for queue interrupt packet counters, " - "deprecated parameter"); - -/* * Normally we tell the chip to deliver Ingress Packets into our DMA buffers * offset by 2 bytes in order to have the IP headers line up on 4-byte * boundaries. This is a requirement for many architectures which will throw @@ -226,13 +198,7 @@ MODULE_PARM_DESC(intr_cnt, */ static int rx_dma_offset = 2; -static bool vf_acls; - #ifdef CONFIG_PCI_IOV -module_param(vf_acls, bool, 0644); -MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, " - "deprecated parameter"); - /* Configure the number of PCI-E Virtual Function which are to be instantiated * on SR-IOV Capable Physical Functions. */ @@ -253,12 +219,6 @@ module_param(select_queue, int, 0644); MODULE_PARM_DESC(select_queue, "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); -static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC; - -module_param(tp_vlan_pri_map, uint, 0644); -MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, " - "deprecated parameter"); - static struct dentry *cxgb4_debugfs_root; static LIST_HEAD(adapter_list); @@ -766,8 +726,8 @@ static void name_msix_vecs(struct adapter *adap) } /* offload queues */ - for_each_ofldrxq(&adap->sge, i) - snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", + for_each_iscsirxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d", adap->port[0]->name, i); for_each_rdmarxq(&adap->sge, i) @@ -782,7 +742,7 @@ static void name_msix_vecs(struct adapter *adap) static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; + int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; int msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, @@ -799,11 +759,11 @@ static int request_msix_queue_irqs(struct adapter *adap) goto unwind; msi_index++; } - for_each_ofldrxq(s, ofldqidx) { + for_each_iscsirxq(s, iscsiqidx) { err = request_irq(adap->msix_info[msi_index].vec, t4_sge_intr_msix, 0, adap->msix_info[msi_index].desc, - &s->ofldrxq[ofldqidx].rspq); + &s->iscsirxq[iscsiqidx].rspq); if (err) goto unwind; msi_index++; @@ -835,9 +795,9 @@ unwind: while (--rdmaqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); - while (--ofldqidx >= 0) + while (--iscsiqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, - &s->ofldrxq[ofldqidx].rspq); + &s->iscsirxq[iscsiqidx].rspq); while (--ethqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->ethrxq[ethqidx].rspq); @@ -853,8 +813,9 @@ static void free_msix_queue_irqs(struct adapter *adap) free_irq(adap->msix_info[1].vec, &s->fw_evtq); for_each_ethrxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); - for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); + for_each_iscsirxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, + &s->iscsirxq[i].rspq); for_each_rdmarxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); for_each_rdmaciq(s, i) @@ -1093,8 +1054,8 @@ freeout: t4_free_sge_resources(adap); } } - j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ - for_each_ofldrxq(s, i) { + j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */ + for_each_iscsirxq(s, i) { err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], adap->port[i / j], s->fw_evtq.cntxt_id); @@ -1110,7 +1071,7 @@ freeout: t4_free_sge_resources(adap); msi_idx += nq; \ } while (0) - ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq); + ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq); ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); @@ -1181,16 +1142,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx) */ if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ - f->l2t = t4_l2t_alloc_switching(adapter->l2t); + f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, + f->fs.eport, f->fs.dmac); if (f->l2t == NULL) { kfree_skb(skb); - return -EAGAIN; - } - if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, - f->fs.eport, f->fs.dmac)) { - cxgb4_l2t_release(f->l2t); - f->l2t = NULL; - kfree_skb(skb); return -ENOMEM; } } @@ -1511,7 +1466,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) else stid = -1; } else { - stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); if (stid < 0) stid = -1; } @@ -1525,7 +1480,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) if (family == PF_INET) t->stids_in_use++; else - t->stids_in_use += 4; + t->stids_in_use += 2; } spin_unlock_bh(&t->stid_lock); return stid; @@ -1576,13 +1531,13 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else - bitmap_release_region(t->stid_bmap, stid, 2); + bitmap_release_region(t->stid_bmap, stid, 1); t->stid_tab[stid].data = NULL; if (stid < t->nstids) { if (family == PF_INET) t->stids_in_use--; else - t->stids_in_use -= 4; + t->stids_in_use -= 2; } else { t->sftids_in_use--; } @@ -2283,7 +2238,7 @@ static void disable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) disable_txq_db(&adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) disable_txq_db(&adap->sge.ofldtxq[i].q); for_each_port(adap, i) disable_txq_db(&adap->sge.ctrlq[i].q); @@ -2295,7 +2250,7 @@ static void enable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) enable_txq_db(adap, &adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) enable_txq_db(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) enable_txq_db(adap, &adap->sge.ctrlq[i].q); @@ -2365,7 +2320,7 @@ static void recover_all_queues(struct adapter *adap) for_each_ethrxq(&adap->sge, i) sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); @@ -2449,10 +2404,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nrxq = adap->sge.rdmaqs; lli.nciq = adap->sge.rdmaciqs; } else if (uld == CXGB4_ULD_ISCSI) { - lli.rxq_ids = adap->sge.ofld_rxq; - lli.nrxq = adap->sge.ofldqsets; + lli.rxq_ids = adap->sge.iscsi_rxq; + lli.nrxq = adap->sge.iscsiqsets; } - lli.ntxq = adap->sge.ofldqsets; + lli.ntxq = adap->sge.iscsiqsets; lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; @@ -3146,16 +3101,6 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) if (ret < 0) return ret; - /* select capabilities we'll be using */ - if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { - if (!vf_acls) - c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); - else - c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); - } else if (vf_acls) { - dev_err(adap->pdev_dev, "virtualization ACLs not supported"); - return ret; - } c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); @@ -4348,11 +4293,11 @@ static void cfg_queues(struct adapter *adap) * capped by the number of available cores. */ if (n10g) { - i = min_t(int, ARRAY_SIZE(s->ofldrxq), + i = min_t(int, ARRAY_SIZE(s->iscsirxq), num_online_cpus()); - s->ofldqsets = roundup(i, adap->params.nports); + s->iscsiqsets = roundup(i, adap->params.nports); } else - s->ofldqsets = adap->params.nports; + s->iscsiqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; /* Try and allow at least 1 CIQ per cpu rounding down @@ -4383,8 +4328,8 @@ static void cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) s->ofldtxq[i].q.size = 1024; - for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { - struct sge_ofld_rxq *r = &s->ofldrxq[i]; + for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) { + struct sge_ofld_rxq *r = &s->iscsirxq[i]; init_rspq(adap, &r->rspq, 5, 1, 1024, 64); r->rspq.uld = CXGB4_ULD_ISCSI; @@ -4465,7 +4410,7 @@ static int enable_msix(struct adapter *adap) want = s->max_ethqsets + EXTRA_VECS; if (is_offload(adap)) { - want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; + want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets; /* need nchan for each possible ULD */ ofld_need = 3 * nchan; } @@ -4504,13 +4449,13 @@ static int enable_msix(struct adapter *adap) /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - s->rdmaqs - s->rdmaciqs; - s->ofldqsets = (i / nchan) * nchan; /* round down */ + s->iscsiqsets = (i / nchan) * nchan; /* round down */ } for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", - allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs, + allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, s->rdmaciqs); kfree(entries); @@ -4538,6 +4483,79 @@ static int init_rss(struct adapter *adap) return 0; } +static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u32 lnkcap1, lnkcap2; + int err1, err2; + +#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP, + &lnkcap1); + err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2, + &lnkcap2); + if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *speed = PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + if (!err1) { + *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; + if (!lnkcap2) { /* pre-r3.0 */ + if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + } + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return err1 ? err1 : err2 ? err2 : -EINVAL; + return 0; +} + +static void cxgb4_check_pcie_caps(struct adapter *adap) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + +#define PCIE_SPEED_STR(speed) \ + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ + "Unknown") + + if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) { + dev_warn(adap->pdev_dev, + "Unable to determine PCIe device BW capabilities\n"); + return; + } + + if (pcie_get_minimum_link(adap->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + dev_warn(adap->pdev_dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } + + dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n", + PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); + dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n", + width, width_cap); + if (speed < speed_cap || width < width_cap) + dev_info(adap->pdev_dev, + "A slot with more lanes and/or higher speed is " + "suggested for optimal performance.\n"); +} + static void print_port_info(const struct net_device *dev) { char buf[80]; @@ -4565,10 +4583,10 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", + netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n", adap->params.vpd.id, CHELSIO_CHIP_RELEASE(adap->params.chip), buf, - is_offload(adap) ? "R" : "", adap->params.pci.width, spd, + is_offload(adap) ? "R" : "", (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); netdev_info(dev, "S/N: %s, P/N: %s\n", @@ -4787,8 +4805,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) - t4_write_reg(adapter, SGE_STAT_CFG_A, - STATSOURCE_T5_V(7) | STATMODE_V(0)); + t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | + (is_t5(adapter->params.chip) ? STATMODE_V(0) : + T6_STATMODE_V(0))); for_each_port(adapter, i) { struct net_device *netdev; @@ -4865,15 +4884,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } #if IS_ENABLED(CONFIG_IPV6) - adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, - adapter->clipt_end); - if (!adapter->clipt) { - /* We tolerate a lack of clip_table, giving up - * some functionality + if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && + (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { + /* CLIP functionality is not present in hardware, + * hence disable all offload features */ dev_warn(&pdev->dev, - "could not allocate Clip table, continuing\n"); + "CLIP not enabled in hardware, continuing\n"); adapter->params.offload = 0; + } else { + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up + * some functionality + */ + dev_warn(&pdev->dev, + "could not allocate Clip table, continuing\n"); + adapter->params.offload = 0; + } } #endif if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { @@ -4904,6 +4933,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else if (msi > 0 && pci_enable_msi(pdev) == 0) adapter->flags |= USING_MSI; + /* check for PCI Express bandwidth capabiltites */ + cxgb4_check_pcie_caps(adapter); + err = init_rss(adapter); if (err) goto out_free_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index ac27898c6ab0..5b0f3ef348e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -66,7 +66,7 @@ struct l2t_data { static inline unsigned int vlan_prio(const struct l2t_entry *e) { - return e->vlan >> 13; + return e->vlan >> VLAN_PRIO_SHIFT; } static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) @@ -161,8 +161,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - t4_ofld_send(adap, skb); + t4_mgmt_tx(adap, skb); if (sync && e->state != L2T_STATE_SWITCHING) e->state = L2T_STATE_SYNC_WRITE; @@ -175,14 +174,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) */ static void send_pending(struct adapter *adap, struct l2t_entry *e) { - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; + struct sk_buff *skb; - e->arpq_head = skb->next; - skb->next = NULL; + while ((skb = __skb_dequeue(&e->arpq)) != NULL) t4_ofld_send(adap, skb); - } - e->arpq_tail = NULL; } /* @@ -222,12 +217,7 @@ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) */ static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) { - skb->next = NULL; - if (e->arpq_head) - e->arpq_tail->next = skb; - else - e->arpq_head = skb; - e->arpq_tail = skb; + __skb_queue_tail(&e->arpq, skb); } int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, @@ -259,7 +249,8 @@ again: if (e->state == L2T_STATE_RESOLVING && !neigh_event_send(e->neigh, NULL)) { spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + if (e->state == L2T_STATE_RESOLVING && + !skb_queue_empty(&e->arpq)) write_l2e(adap, e, 1); spin_unlock_bh(&e->lock); } @@ -305,12 +296,82 @@ found: return e; } -/* - * Called when an L2T entry has no more users. +static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan, + u8 port, u8 *dmac) +{ + struct l2t_entry *end, *e, **p; + struct l2t_entry *first_free = NULL; + + for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) { + if (atomic_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (e->state == L2T_STATE_SWITCHING) { + if (ether_addr_equal(e->dmac, dmac) && + (e->vlan == vlan) && (e->lport == port)) + goto exists; + } + } + } + + if (first_free) { + e = first_free; + goto found; + } + + return NULL; + +found: + /* The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + e->state = L2T_STATE_UNUSED; + +exists: + return e; +} + +/* Called when an L2T entry has no more users. The entry is left in the hash + * table since it is likely to be reused but we also bump nfree to indicate + * that the entry can be reallocated for a different neighbor. We also drop + * the existing neighbor reference in case the neighbor is going away and is + * waiting on our reference. + * + * Because entries can be reallocated to other neighbors once their ref count + * drops to 0 we need to take the entry's lock to avoid races with a new + * incarnation. */ +static void _t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + struct sk_buff *skb; + + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + while ((skb = __skb_dequeue(&e->arpq)) != NULL) + kfree_skb(skb); + } + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +/* Locked version of _t4_l2e_free */ static void t4_l2e_free(struct l2t_entry *e) { struct l2t_data *d; + struct sk_buff *skb; spin_lock_bh(&e->lock); if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ @@ -318,13 +379,8 @@ static void t4_l2e_free(struct l2t_entry *e) neigh_release(e->neigh); e->neigh = NULL; } - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; - - e->arpq_head = skb->next; + while ((skb = __skb_dequeue(&e->arpq)) != NULL) kfree_skb(skb); - } - e->arpq_tail = NULL; } spin_unlock_bh(&e->lock); @@ -457,18 +513,19 @@ EXPORT_SYMBOL(cxgb4_select_ntuple); * on the arpq head. If a packet specifies a failure handler it is invoked, * otherwise the packet is sent to the device. */ -static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) { - while (arpq) { - struct sk_buff *skb = arpq; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - arpq = skb->next; - skb->next = NULL; + spin_unlock(&e->lock); if (cb->arp_err_handler) cb->arp_err_handler(cb->handle, skb); else t4_ofld_send(adap, skb); + spin_lock(&e->lock); } } @@ -479,7 +536,7 @@ static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { struct l2t_entry *e; - struct sk_buff *arpq = NULL; + struct sk_buff_head *arpq = NULL; struct l2t_data *d = adap->l2t; int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; @@ -506,10 +563,9 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) if (e->state == L2T_STATE_RESOLVING) { if (neigh->nud_state & NUD_FAILED) { - arpq = e->arpq_head; - e->arpq_head = e->arpq_tail = NULL; + arpq = &e->arpq; } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && - e->arpq_head) { + !skb_queue_empty(&e->arpq)) { write_l2e(adap, e, 1); } } else { @@ -519,43 +575,66 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - spin_unlock_bh(&e->lock); - if (arpq) - handle_failed_resolution(adap, arpq); + handle_failed_resolution(adap, e); + spin_unlock_bh(&e->lock); } /* Allocate an L2T entry for use by a switching rule. Such need to be * explicitly freed and while busy they are not on any hash chain, so normal * address resolution updates do not see them. */ -struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, + u8 port, u8 *eth_addr) { + struct l2t_data *d = adap->l2t; struct l2t_entry *e; + int ret; write_lock_bh(&d->lock); - e = alloc_l2e(d); + e = find_or_alloc_l2e(d, vlan, port, eth_addr); if (e) { spin_lock(&e->lock); /* avoid race with t4_l2t_free */ - e->state = L2T_STATE_SWITCHING; - atomic_set(&e->refcnt, 1); + if (!atomic_read(&e->refcnt)) { + e->state = L2T_STATE_SWITCHING; + e->vlan = vlan; + e->lport = port; + ether_addr_copy(e->dmac, eth_addr); + atomic_set(&e->refcnt, 1); + ret = write_l2e(adap, e, 0); + if (ret < 0) { + _t4_l2e_free(e); + spin_unlock(&e->lock); + write_unlock_bh(&d->lock); + return NULL; + } + } else { + atomic_inc(&e->refcnt); + } + spin_unlock(&e->lock); } write_unlock_bh(&d->lock); return e; } -/* Sets/updates the contents of a switching L2T entry that has been allocated - * with an earlier call to @t4_l2t_alloc_switching. +/** + * @dev: net_device pointer + * @vlan: VLAN Id + * @port: Associated port + * @dmac: Destination MAC address to add to L2T + * Returns pointer to the allocated l2t entry + * + * Allocates an L2T entry for use by switching rule of a filter */ -int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, - u8 port, u8 *eth_addr) +struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, + u8 port, u8 *dmac) { - e->vlan = vlan; - e->lport = port; - memcpy(e->dmac, eth_addr, ETH_ALEN); - return write_l2e(adap, e, 0); + struct adapter *adap = netdev2adap(dev); + + return t4_l2t_alloc_switching(adap, vlan, port, dmac); } +EXPORT_SYMBOL(cxgb4_l2t_alloc_switching); struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { @@ -585,6 +664,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) d->l2tab[i].state = L2T_STATE_UNUSED; spin_lock_init(&d->l2tab[i].lock); atomic_set(&d->l2tab[i].refcnt, 0); + skb_queue_head_init(&d->l2tab[i].arpq); } return d; } @@ -619,7 +699,8 @@ static char l2e_state(const struct l2t_entry *e) case L2T_STATE_VALID: return 'V'; case L2T_STATE_STALE: return 'S'; case L2T_STATE_SYNC_WRITE: return 'W'; - case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_RESOLVING: + return skb_queue_empty(&e->arpq) ? 'R' : 'A'; case L2T_STATE_SWITCHING: return 'X'; default: return 'U'; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index b38dc526aad5..4e2d47ac102b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -76,8 +76,7 @@ struct l2t_entry { struct neighbour *neigh; /* associated neighbour */ struct l2t_entry *first; /* start of hash chain */ struct l2t_entry *next; /* next l2t_entry on chain */ - struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ - struct sk_buff *arpq_tail; + struct sk_buff_head arpq; /* packet queue awaiting resolution */ spinlock_t lock; atomic_t refcnt; /* entry reference count */ u16 hash; /* hash bucket the entry is on */ @@ -114,10 +113,11 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, unsigned int priority); u64 cxgb4_select_ntuple(struct net_device *dev, const struct l2t_entry *l2t); +struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, + u8 port, u8 *dmac); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); -struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); -int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, - u8 port, u8 *eth_addr); +struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, + u8 port, u8 *dmac); struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b7b93e7a643d..b4eb4680a27c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { __free_pages(pg, s->fl_pg_order); + q->mapping_err++; goto out; /* do not try small pages for this error */ } mapping |= RX_LARGE_PG_BUF; @@ -642,6 +643,7 @@ alloc_small_pages: PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { put_page(pg); + q->mapping_err++; goto out; } *d++ = cpu_to_be64(mapping); @@ -663,6 +665,7 @@ out: cred = q->avail - cred; if (unlikely(fl_starving(adap, q))) { smp_wmb(); + q->low++; set_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.starving_fl); } @@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, *p = 0; } +static void *inline_tx_skb_header(const struct sk_buff *skb, + const struct sge_txq *q, void *pos, + int length) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(length <= left)) { + memcpy(pos, skb->data, length); + pos += length; + } else { + memcpy(pos, skb->data, left); + memcpy(q->desc, skb->data + left, length - left); + pos = (void *)q->desc + (length - left); + } + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) { + *p = 0; + return p + 1; + } + return p; +} + /* * Figure out what HW csum a packet wants and return the appropriate control * bits. @@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) @@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) } /** - * service_ofldq - restart a suspended offload queue + * service_ofldq - service/restart a suspended offload queue * @q: the offload queue * - * Services an offload Tx queue by moving packets from its packet queue - * to the HW Tx ring. The function starts and ends with the queue locked. + * Services an offload Tx queue by moving packets from its Pending Send + * Queue to the Hardware TX ring. The function starts and ends with the + * Send Queue locked, but drops the lock while putting the skb at the + * head of the Send Queue onto the Hardware TX Ring. Dropping the lock + * allows more skbs to be added to the Send Queue by other threads. + * The packet being processed at the head of the Pending Send Queue is + * left on the queue in case we experience DMA Mapping errors, etc. + * and need to give up and restart later. + * + * service_ofldq() can be thought of as a task which opportunistically + * uses other threads execution contexts. We use the Offload Queue + * boolean "service_ofldq_running" to make sure that only one instance + * is ever running at a time ... */ static void service_ofldq(struct sge_ofld_txq *q) { - u64 *pos; + u64 *pos, *before, *end; int credits; struct sk_buff *skb; + struct sge_txq *txq; + unsigned int left; unsigned int written = 0; unsigned int flits, ndesc; + /* If another thread is currently in service_ofldq() processing the + * Pending Send Queue then there's nothing to do. Otherwise, flag + * that we're doing the work and continue. Examining/modifying + * the Offload Queue boolean "service_ofldq_running" must be done + * while holding the Pending Send Queue Lock. + */ + if (q->service_ofldq_running) + return; + q->service_ofldq_running = true; + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { - /* - * We drop the lock but leave skb on sendq, thus retaining - * exclusive access to the state of the queue. + /* We drop the lock while we're working with the skb at the + * head of the Pending Send Queue. This allows more skbs to + * be added to the Pending Send Queue while we're working on + * this one. We don't need to lock to guard the TX Ring + * updates because only one thread of execution is ever + * allowed into service_ofldq() at a time. */ spin_unlock(&q->sendq.lock); @@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q) } else { int last_desc, hdr_len = skb_transport_offset(skb); - memcpy(pos, skb->data, hdr_len); - write_sgl(skb, &q->q, (void *)pos + hdr_len, - pos + flits, hdr_len, + /* The WR headers may not fit within one descriptor. + * So we need to deal with wrap-around here. + */ + before = (u64 *)pos; + end = (u64 *)pos + flits; + txq = &q->q; + pos = (void *)inline_tx_skb_header(skb, &q->q, + (void *)pos, + hdr_len); + if (before > (u64 *)pos) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + } + + /* If current position is already at the end of the + * ofld queue, reset the current to point to + * start of the queue and update the end ptr as well. + */ + if (pos == (u64 *)txq->stat) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + pos = (void *)txq->desc; + } + + write_sgl(skb, &q->q, (void *)pos, + end, hdr_len, (dma_addr_t *)skb->head); #ifdef CONFIG_NEED_DMA_MAP_STATE skb->dev = q->adap->port[0]; @@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q) written = 0; } + /* Reacquire the Pending Send Queue Lock so we can unlink the + * skb we've just successfully transferred to the TX Ring and + * loop for the next skb which may be at the head of the + * Pending Send Queue. + */ spin_lock(&q->sendq.lock); __skb_unlink(skb, &q->sendq); if (is_ofld_imm(skb)) @@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q) } if (likely(written)) ring_tx_db(q->adap, &q->q, written); + + /*Indicate that no thread is processing the Pending Send Queue + * currently. + */ + q->service_ofldq_running = false; } /** @@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) { skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ spin_lock(&q->sendq.lock); + + /* Queue the new skb onto the Offload Queue's Pending Send Queue. If + * that results in this new skb being the only one on the queue, start + * servicing it. If there are other skbs already on the list, then + * either the queue is currently being processed or it's been stopped + * for some reason and it'll be restarted at a later time. Restart + * paths are triggered by events like experiencing a DMA Mapping Error + * or filling the Hardware TX Ring. + */ __skb_queue_tail(&q->sendq, skb); if (q->sendq.qlen == 1) service_ofldq(q); + spin_unlock(&q->sendq.lock); return NET_XMIT_SUCCESS; } @@ -1864,7 +1960,6 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); - skb_mark_napi_id(skb, &rxq->rspq.napi); pi = netdev_priv(skb->dev); if (pi->rxtstamp) cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), @@ -2193,7 +2288,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) if (likely(work_done < budget)) { int timer_index; - napi_complete(napi); + napi_complete_done(napi, work_done); timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); if (q->adaptive_rx) { @@ -2460,7 +2555,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->size = roundup(iq->size, 16); iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, - &iq->phys_addr, NULL, 0, NUMA_NO_NODE); + &iq->phys_addr, NULL, 0, + dev_to_node(adap->pdev_dev)); if (!iq->desc) return -ENOMEM; @@ -2500,7 +2596,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->size = roundup(fl->size, 8); fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), &fl->addr, - &fl->sdesc, s->stat_len, NUMA_NO_NODE); + &fl->sdesc, s->stat_len, + dev_to_node(adap->pdev_dev)); if (!fl->desc) goto fl_nomem; @@ -2528,7 +2625,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto err; netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); - napi_hash_add(&iq->napi); iq->cur_desc = iq->desc; iq->cidx = 0; iq->gen = 1; @@ -2574,8 +2670,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * simple (and hopefully less wrong). */ if (!is_t4(adap->params.chip) && cong >= 0) { - u32 param, val; + u32 param, val, ch_map = 0; int i; + u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | @@ -2587,9 +2684,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); for (i = 0; i < 4; i++) { if (cong & (1 << i)) - val |= - CONMCTXT_CNGCHMAP_V(1 << (i << 2)); + ch_map |= 1 << (i << cng_ch_bits_log); } + val |= CONMCTXT_CNGCHMAP_V(ch_map); } ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); @@ -2884,7 +2981,7 @@ void t4_free_sge_resources(struct adapter *adap) } /* clean up RDMA and iSCSI Rx queues */ - t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); + t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); @@ -3077,8 +3174,7 @@ static int t4_sge_init_soft(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_control2, sge_conm_ctrl; - unsigned int ingpadboundary, ingpackboundary; + u32 sge_control, sge_conm_ctrl; int ret, egress_threshold; /* @@ -3089,35 +3185,7 @@ int t4_sge_init(struct adapter *adap) s->pktshift = PKTSHIFT_G(sge_control); s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; - /* T4 uses a single control field to specify both the PCIe Padding and - * Packing Boundary. T5 introduced the ability to specify these - * separately. The actual Ingress Packet Data alignment boundary - * within Packed Buffer Mode is the maximum of these two - * specifications. (Note that it makes no real practical sense to - * have the Pading Boudary be larger than the Packing Boundary but you - * could set the chip up that way and, in fact, legacy T4 code would - * end doing this because it would initialize the Padding Boundary and - * leave the Packing Boundary initialized to 0 (16 bytes).) - */ - ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + - INGPADBOUNDARY_SHIFT_X); - if (is_t4(adap->params.chip)) { - s->fl_align = ingpadboundary; - } else { - /* T5 has a different interpretation of one of the PCIe Packing - * Boundary values. - */ - sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); - ingpackboundary = INGPACKBOUNDARY_G(sge_control2); - if (ingpackboundary == INGPACKBOUNDARY_16B_X) - ingpackboundary = 16; - else - ingpackboundary = 1 << (ingpackboundary + - INGPACKBOUNDARY_SHIFT_X); - - s->fl_align = max(ingpadboundary, ingpackboundary); - } - + s->fl_align = t4_fl_pkt_align(adap); ret = t4_sge_init_soft(adap); if (ret < 0) return ret; @@ -3135,10 +3203,21 @@ int t4_sge_init(struct adapter *adap) * buffers. */ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); - if (is_t4(adap->params.chip)) + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T4: egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); - else + break; + case CHELSIO_T5: egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); + break; + case CHELSIO_T6: + egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); + break; + default: + dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; + } s->fl_starve_thres = 2*egress_threshold + 1; t4_idma_monitor_init(adap, &s->idma_monitor); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cf61a5869c6e..636b4691f252 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1942,8 +1942,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1254, - 0x1280, 0x133c, + 0x11fc, 0x1258, + 0x1280, 0x12d4, + 0x12d9, 0x12d9, + 0x12de, 0x12de, + 0x12e3, 0x12e3, + 0x12e8, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, @@ -1973,7 +1977,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5e50, 0x5e94, 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, - 0x5ec8, 0x5ecc, + 0x5ec8, 0x5ed0, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, @@ -2048,7 +2052,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, - 0x19238, 0x192b0, + 0x19238, 0x19290, + 0x192a4, 0x192b0, 0x192bc, 0x192bc, 0x19348, 0x1934c, 0x193f8, 0x19418, @@ -2442,7 +2447,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, - 0x41304, 0x413c8, + 0x41304, 0x413b8, + 0x413c0, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, @@ -5254,7 +5260,7 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) int i; u32 data[2]; - for (i = 0; i < PM_NSTATS; i++) { + for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1); cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A); if (is_t4(adap->params.chip)) { @@ -5281,7 +5287,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) int i; u32 data[2]; - for (i = 0; i < PM_NSTATS; i++) { + for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1); cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A); if (is_t4(adap->params.chip)) { @@ -5310,7 +5316,14 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) if (n == 0) return idx == 0 ? 0xf : 0; - if (n == 1) + /* In T6 (which is a 2 port card), + * port 0 is mapped to channel 0 and port 1 is mapped to channel 1. + * For 2 port T4/T5 adapter, + * port 0 is mapped to channel 0 and 1, + * port 1 is mapped to channel 2 and 3. + */ + if ((n == 1) && + (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } @@ -5689,6 +5702,39 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; + static const char * const t6_decode[] = { + "IDMA_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_DROP_SEND_INC", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + }; static const u32 sge_regs[] = { SGE_DEBUG_DATA_LOW_INDEX_2_A, SGE_DEBUG_DATA_LOW_INDEX_3_A, @@ -5697,6 +5743,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) const char **sge_idma_decode; int sge_idma_decode_nstates; int i; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + /* Select the right set of decode strings to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T4: + sge_idma_decode = (const char **)t4_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); + break; + + case CHELSIO_T5: + sge_idma_decode = (const char **)t5_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); + break; + + case CHELSIO_T6: + sge_idma_decode = (const char **)t6_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); + break; + + default: + dev_err(adapter->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return; + } if (is_t4(adapter->params.chip)) { sge_idma_decode = (const char **)t4_decode; @@ -6097,6 +6169,59 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, } /** + * t4_fl_pkt_align - return the fl packet alignment + * @adap: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + * + */ +int t4_fl_pkt_align(struct adapter *adap) +{ + u32 sge_control, sge_control2; + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + sge_control = t4_read_reg(adap, SGE_CONTROL_A); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adap->params.chip)) { + /* T5 has a weird interpretation of one of the PCIe Packing + * Boundary values. No idea why ... + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +/** * t4_fixup_host_params - fix up host-dependent parameters * @adap: the adapter * @page_size: the host's Base Page Size @@ -6114,6 +6239,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, unsigned int stat_len = cache_line_size > 64 ? 128 : 64; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align_log = fls(fl_align) - 1; + unsigned int ingpad; t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, HOSTPAGESIZEPF0_V(sge_hps) | @@ -6161,10 +6287,16 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, fl_align = 64; fl_align_log = 6; } + + if (is_t5(adap->params.chip)) + ingpad = INGPCIEBOUNDARY_32B_X; + else + ingpad = T6_INGPADBOUNDARY_32B_X; + t4_set_reg_field(adap, SGE_CONTROL_A, INGPADBOUNDARY_V(INGPADBOUNDARY_M) | EGRSTATUSPAGESIZE_F, - INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) | + INGPADBOUNDARY_V(ingpad) | EGRSTATUSPAGESIZE_V(stat_len != 64)); t4_set_reg_field(adap, SGE_CONTROL2_A, INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), @@ -7060,7 +7192,12 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 128; adapter->params.arch.nchan = NCHAN; + adapter->params.arch.pm_stats_cnt = PM_NSTATS; adapter->params.arch.vfcount = 128; + /* Congestion map is for 4 channels so that + * MPS can have 4 priority per port. + */ + adapter->params.arch.cng_ch_bits_log = 2; break; case CHELSIO_T5: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); @@ -7069,7 +7206,9 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_T5_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 128; adapter->params.arch.nchan = NCHAN; + adapter->params.arch.pm_stats_cnt = PM_NSTATS; adapter->params.arch.vfcount = 128; + adapter->params.arch.cng_ch_bits_log = 2; break; case CHELSIO_T6: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); @@ -7078,7 +7217,12 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_T5_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 256; adapter->params.arch.nchan = 2; + adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS; adapter->params.arch.vfcount = 256; + /* Congestion map will be for 2 channels so that + * MPS can have 8 priority per port. + */ + adapter->params.arch.cng_ch_bits_log = 3; break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 13708fde1668..2fc60e83a7a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -48,6 +48,7 @@ enum { NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ PM_NSTATS = 5, /* # of PM stats */ + T6_PM_NSTATS = 7, /* # of PM stats in T6 */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 03ed00c49823..a8dda635456d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -162,6 +162,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ + CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index fc3044c8ac1c..9fea255c7e87 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -492,6 +492,9 @@ #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S) #define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M) +#define T6_STATMODE_S 0 +#define T6_STATMODE_V(x) ((x) << T6_STATMODE_S) + #define SGE_DBFIFO_STATUS2_A 0x1118 #define HP_INT_THRESH_T5_S 10 @@ -2395,6 +2398,30 @@ #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 +#define VIDL_S 16 +#define VIDL_M 0xffffU +#define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M) + +#define DATALKPTYPE_S 10 +#define DATALKPTYPE_M 0x3U +#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M) + +#define DATAPORTNUM_S 12 +#define DATAPORTNUM_M 0xfU +#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M) + +#define DATADIPHIT_S 8 +#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S) +#define DATADIPHIT_F DATADIPHIT_V(1U) + +#define DATAVIDH2_S 7 +#define DATAVIDH2_V(x) ((x) << DATAVIDH2_S) +#define DATAVIDH2_F DATAVIDH2_V(1U) + +#define DATAVIDH1_S 0 +#define DATAVIDH1_M 0x7fU +#define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M) + #define USED_S 16 #define USED_M 0x7ffU #define USED_G(x) (((x) >> USED_S) & USED_M) @@ -2802,6 +2829,10 @@ #define HASHEN_V(x) ((x) << HASHEN_S) #define HASHEN_F HASHEN_V(1U) +#define ASLIPCOMPEN_S 17 +#define ASLIPCOMPEN_V(x) ((x) << ASLIPCOMPEN_S) +#define ASLIPCOMPEN_F ASLIPCOMPEN_V(1U) + #define REQQPARERR_S 16 #define REQQPARERR_V(x) ((x) << REQQPARERR_S) #define REQQPARERR_F REQQPARERR_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index 7bdee3bf75ec..a5231fa771db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -53,6 +53,9 @@ #define INGPADBOUNDARY_SHIFT_X 5 +#define T6_INGPADBOUNDARY_SHIFT_X 3 +#define T6_INGPADBOUNDARY_32B_X 2 + /* CONTROL2 register */ #define INGPACKBOUNDARY_SHIFT_X 5 #define INGPACKBOUNDARY_16B_X 0 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fa3786a9d30e..6528231d8a59 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2607,7 +2607,7 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; - unsigned int ingpadboundary, ingpackboundary; + unsigned int ingpadboundary, ingpackboundary, ingpad_shift; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2642,9 +2642,16 @@ int t4vf_sge_init(struct adapter *adapter) * could set the chip up that way and, in fact, legacy T4 code would * end doing this because it would initialize the Padding Boundary and * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) + - INGPADBOUNDARY_SHIFT_X); + ingpad_shift); if (is_t4(adapter->params.chip)) { s->fl_align = ingpadboundary; } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h index b516b12b1884..f859db3d254c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h @@ -54,6 +54,7 @@ #define T4VF_MPS_BASE_ADDR 0x0100 #define T4VF_PL_BASE_ADDR 0x0200 #define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T6VF_MBDATA_BASE_ADDR 0x0280 #define T4VF_CIM_BASE_ADDR 0x0300 #define T4VF_REGMAP_START 0x0000 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 63dd5fdac5b9..b6fa74aafe47 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -120,12 +120,19 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 1, 1, 3, 5, 10, 10, 20, 50, 100 }; - u32 v; + u32 v, mbox_data; int i, ms, delay_idx; const __be64 *p; - u32 mbox_data = T4VF_MBDATA_BASE_ADDR; u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + /* In T6, mailbox size is changed to 128 bytes to avoid + * invalidating the entire prefetch buffer. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + mbox_data = T4VF_MBDATA_BASE_ADDR; + else + mbox_data = T6VF_MBDATA_BASE_ADDR; + /* * Commands must be multiples of 16 bytes in length and may not be * larger than the size of the Mailbox Data register array. diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b36643ef0593..b2182d3ba3cc 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2458,13 +2458,11 @@ static int enic_dev_init(struct enic *enic) switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); - napi_hash_add(&enic->napi[0]); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { netif_napi_add(netdev, &enic->napi[i], enic_poll_msix_rq, NAPI_POLL_WEIGHT); - napi_hash_add(&enic->napi[i]); } for (i = 0; i < enic->wq_count; i++) netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 8966f3159bb2..3acde3b9b767 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev) static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; -static int __init de4x5_eisa_probe (struct device *gendev) +static int de4x5_eisa_probe(struct device *gendev) { struct eisa_device *edev; u_long iobase; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index ccca4799c27b..f92b6d948398 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -70,7 +70,6 @@ static const int multicast_filter_limit = 0x40; static int rio_open (struct net_device *dev); static void rio_timer (unsigned long data); static void rio_tx_timeout (struct net_device *dev); -static void alloc_list (struct net_device *dev); static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); static irqreturn_t rio_interrupt (int irq, void *dev_instance); static void rio_free_tx (struct net_device *dev, int irq); @@ -253,19 +252,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_unmap_rx; - if (np->chip_id == CHIP_IP1000A && - (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { - /* PHY magic taken from ipg driver, undocumented registers */ - mii_write(dev, np->phy_addr, 31, 0x0001); - mii_write(dev, np->phy_addr, 27, 0x01e0); - mii_write(dev, np->phy_addr, 31, 0x0002); - mii_write(dev, np->phy_addr, 27, 0xeb8e); - mii_write(dev, np->phy_addr, 31, 0x0000); - mii_write(dev, np->phy_addr, 30, 0x005e); - /* advertise 1000BASE-T half & full duplex, prefer MASTER */ - mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); - } - /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; @@ -275,13 +261,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) if (np->an_enable == 2) { np->an_enable = 1; } - mii_set_media_pcs (dev); } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; - mii_set_media (dev); } err = register_netdev (dev); @@ -446,19 +430,106 @@ static void rio_set_led_mode(struct net_device *dev) dw32(ASICCtrl, mode); } -static int -rio_open (struct net_device *dev) +static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) +{ + return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); +} + +static void free_list(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct sk_buff *skb; + int i; + + /* Free all the skbuffs in the queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = np->rx_skbuff[i]; + if (skb) { + pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), + skb->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + np->rx_skbuff[i] = NULL; + } + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = np->tx_skbuff[i]; + if (skb) { + pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + np->tx_skbuff[i] = NULL; + } + } +} + +static void rio_reset_ring(struct netdev_private *np) +{ + int i; + + np->cur_rx = 0; + np->cur_tx = 0; + np->old_rx = 0; + np->old_tx = 0; + + for (i = 0; i < TX_RING_SIZE; i++) + np->tx_ring[i].status = cpu_to_le64(TFDDone); + + for (i = 0; i < RX_RING_SIZE; i++) + np->rx_ring[i].status = 0; +} + + /* allocate and initialize Tx and Rx descriptors */ +static int alloc_list(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + rio_reset_ring(np); + np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); + + /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + + ((i + 1) % TX_RING_SIZE) * + sizeof(struct netdev_desc)); + } + + /* Initialize Rx descriptors & allocate buffers */ + for (i = 0; i < RX_RING_SIZE; i++) { + /* Allocated fixed size of skbuff */ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); + np->rx_skbuff[i] = skb; + if (!skb) { + free_list(dev); + return -ENOMEM; + } + + np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + + ((i + 1) % RX_RING_SIZE) * + sizeof(struct netdev_desc)); + /* Rubicon now supports 40 bits of addressing space. */ + np->rx_ring[i].fraginfo = + cpu_to_le64(pci_map_single( + np->pdev, skb->data, np->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); + } + + return 0; +} + +static void rio_hw_init(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; - const int irq = np->pdev->irq; int i; u16 macctrl; - i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); - if (i) - return i; - /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); @@ -469,11 +540,31 @@ rio_open (struct net_device *dev) /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); + if (np->chip_id == CHIP_IP1000A && + (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { + /* PHY magic taken from ipg driver, undocumented registers */ + mii_write(dev, np->phy_addr, 31, 0x0001); + mii_write(dev, np->phy_addr, 27, 0x01e0); + mii_write(dev, np->phy_addr, 31, 0x0002); + mii_write(dev, np->phy_addr, 27, 0xeb8e); + mii_write(dev, np->phy_addr, 31, 0x0000); + mii_write(dev, np->phy_addr, 30, 0x005e); + /* advertise 1000BASE-T half & full duplex, prefer MASTER */ + mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); + } + + if (np->phy_media) + mii_set_media_pcs(dev); + else + mii_set_media(dev); + /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); - alloc_list (dev); + /* Set RFDListPtr */ + dw32(RFDListPtr0, np->rx_ring_dma); + dw32(RFDListPtr1, 0); /* Set station address */ /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works @@ -509,10 +600,6 @@ rio_open (struct net_device *dev) dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } - setup_timer(&np->timer, rio_timer, (unsigned long)dev); - np->timer.expires = jiffies + 1*HZ; - add_timer (&np->timer); - /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); @@ -522,6 +609,42 @@ rio_open (struct net_device *dev) macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); +} + +static void rio_hw_stop(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + /* Disable interrupts */ + dw16(IntEnable, 0); + + /* Stop Tx and Rx logics */ + dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); +} + +static int rio_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + const int irq = np->pdev->irq; + int i; + + i = alloc_list(dev); + if (i) + return i; + + rio_hw_init(dev); + + i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); + if (i) { + rio_hw_stop(dev); + free_list(dev); + return i; + } + + setup_timer(&np->timer, rio_timer, (unsigned long)dev); + np->timer.expires = jiffies + 1 * HZ; + add_timer(&np->timer); netif_start_queue (dev); @@ -586,60 +709,6 @@ rio_tx_timeout (struct net_device *dev) dev->trans_start = jiffies; /* prevent tx timeout */ } - /* allocate and initialize Tx and Rx descriptors */ -static void -alloc_list (struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->ioaddr; - int i; - - np->cur_rx = np->cur_tx = 0; - np->old_rx = np->old_tx = 0; - np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); - - /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_skbuff[i] = NULL; - np->tx_ring[i].status = cpu_to_le64 (TFDDone); - np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + - ((i+1)%TX_RING_SIZE) * - sizeof (struct netdev_desc)); - } - - /* Initialize Rx descriptors */ - for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + - ((i + 1) % RX_RING_SIZE) * - sizeof (struct netdev_desc)); - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; - np->rx_skbuff[i] = NULL; - } - - /* Allocate the rx buffers */ - for (i = 0; i < RX_RING_SIZE; i++) { - /* Allocated fixed size of skbuff */ - struct sk_buff *skb; - - skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); - np->rx_skbuff[i] = skb; - if (skb == NULL) - break; - - /* Rubicon now supports 40 bits of addressing space. */ - np->rx_ring[i].fraginfo = - cpu_to_le64 ( pci_map_single ( - np->pdev, skb->data, np->rx_buf_sz, - PCI_DMA_FROMDEVICE)); - np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); - } - - /* Set RFDListPtr */ - dw32(RFDListPtr0, np->rx_ring_dma); - dw32(RFDListPtr1, 0); -} - static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { @@ -748,11 +817,6 @@ rio_interrupt (int irq, void *dev_instance) return IRQ_RETVAL(handled); } -static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) -{ - return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); -} - static void rio_free_tx (struct net_device *dev, int irq) { @@ -1730,44 +1794,16 @@ static int rio_close (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->ioaddr; - struct pci_dev *pdev = np->pdev; - struct sk_buff *skb; - int i; netif_stop_queue (dev); - /* Disable interrupts */ - dw16(IntEnable, 0); - - /* Stop Tx and Rx logics */ - dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); + rio_hw_stop(dev); free_irq(pdev->irq, dev); del_timer_sync (&np->timer); - /* Free all the skbuffs in the queue. */ - for (i = 0; i < RX_RING_SIZE; i++) { - skb = np->rx_skbuff[i]; - if (skb) { - pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]), - skb->len, PCI_DMA_FROMDEVICE); - dev_kfree_skb (skb); - np->rx_skbuff[i] = NULL; - } - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; - } - for (i = 0; i < TX_RING_SIZE; i++) { - skb = np->tx_skbuff[i]; - if (skb) { - pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]), - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb (skb); - np->tx_skbuff[i] = NULL; - } - } + free_list(dev); return 0; } @@ -1795,11 +1831,55 @@ rio_remove1 (struct pci_dev *pdev) } } +#ifdef CONFIG_PM_SLEEP +static int rio_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct netdev_private *np = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + del_timer_sync(&np->timer); + rio_hw_stop(dev); + + return 0; +} + +static int rio_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct netdev_private *np = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + rio_reset_ring(np); + rio_hw_init(dev); + np->timer.expires = jiffies + 1 * HZ; + add_timer(&np->timer); + netif_device_attach(dev); + dl2k_enable_int(np); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); +#define RIO_PM_OPS (&rio_pm_ops) + +#else + +#define RIO_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + static struct pci_driver rio_driver = { .name = "dl2k", .id_table = rio_pci_tbl, .probe = rio_probe1, .remove = rio_remove1, + .driver.pm = RIO_PM_OPS, }; module_pci_driver(rio_driver); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 13d00a38a5bd..b69a9eacc531 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -255,15 +255,9 @@ static int dnet_mii_probe(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = NULL; - int phy_addr; /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (bp->mii_bus->phy_map[phy_addr]) { - phydev = bp->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(bp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); @@ -274,11 +268,11 @@ static int dnet_mii_probe(struct net_device *dev) /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_RMII); } else { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_MII); } @@ -308,7 +302,7 @@ static int dnet_mii_probe(struct net_device *dev) static int dnet_mii_init(struct dnet *bp) { - int err, i; + int err; bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) @@ -323,16 +317,6 @@ static int dnet_mii_init(struct dnet *bp) bp->mii_bus->priv = bp; - bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, - sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out; @@ -892,9 +876,7 @@ static int dnet_probe(struct platform_device *pdev) (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); phydev = bp->phy_dev; - dev_info(&pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d463563e1f70..cf837831304b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.6.0.3" +#define DRV_VER "11.0.0.0" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -521,7 +521,7 @@ struct be_adapter { struct be_drv_stats drv_stats; struct be_aic_obj aic_obj[MAX_EVT_QS]; u8 vlan_prio_bmap; /* Available Priority BitMap */ - u16 recommended_prio; /* Recommended Priority */ + u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ struct be_dma_mem stats_cmd; @@ -547,10 +547,6 @@ struct be_adapter { u32 beacon_state; /* for set_phys_id */ - bool eeh_error; - bool fw_timeout; - bool hw_error; - u32 port_num; char port_name; u8 mc_type; @@ -574,6 +570,8 @@ struct be_adapter { struct be_resources pool_res; /* resources available for the port */ struct be_resources res; /* resources available for the func */ u16 num_vfs; /* Number of VFs provisioned by PF */ + u8 pf_num; /* Numbering used by FW, starts at 0 */ + u8 vf_num; /* Numbering used by FW, starts at 1 */ u8 virtfn; struct be_vf_cfg *vf_cfg; bool be3_native; @@ -591,11 +589,10 @@ struct be_adapter { u32 msg_enable; int be_get_temp_freq; struct be_hwmon hwmon_info; - u8 pf_number; - u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; + u32 fat_dump_len; u16 serial_num[CNTL_SERIAL_NUM_WORDS]; }; @@ -848,8 +845,6 @@ void be_roce_dev_remove(struct be_adapter *); /* * internal function to open-close roce device during ifup-ifdown. */ -void be_roce_dev_open(struct be_adapter *); -void be_roce_dev_close(struct be_adapter *); void be_roce_dev_shutdown(struct be_adapter *); #endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1795c935ff02..b63d8ad2e115 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -308,8 +308,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, if (evt->valid) { adapter->vlan_prio_bmap = evt->available_priority_bmap; - adapter->recommended_prio &= ~VLAN_PRIO_MASK; - adapter->recommended_prio = + adapter->recommended_prio_bits = evt->reco_default_priority << VLAN_PRIO_SHIFT; } } @@ -1713,49 +1712,40 @@ err: } /* Uses synchronous mcc */ -int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) +int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_get_fat *req; int status; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, - NULL); + OPCODE_COMMON_MANAGE_FAT, sizeof(*req), + &wrb, NULL); req->fat_operation = cpu_to_le32(QUERY_FAT); - status = be_mcc_notify_wait(adapter); + status = be_cmd_notify_wait(adapter, &wrb); if (!status) { - struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); + struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb); - if (log_size && resp->log_size) - *log_size = le32_to_cpu(resp->log_size) - + if (dump_size && resp->log_size) + *dump_size = le32_to_cpu(resp->log_size) - sizeof(u32); } -err: - spin_unlock_bh(&adapter->mcc_lock); return status; } -int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) +int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; - int status = 0; + int status; if (buf_len == 0) - return -EIO; + return 0; total_size = buf_len; @@ -1763,11 +1753,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, get_fat_cmd.size, &get_fat_cmd.dma, GFP_ATOMIC); - if (!get_fat_cmd.va) { - dev_err(&adapter->pdev->dev, - "Memory allocation failure while reading FAT data\n"); + if (!get_fat_cmd.va) return -ENOMEM; - } spin_lock_bh(&adapter->mcc_lock); @@ -2291,10 +2278,11 @@ err: return status; } -int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, - const char *obj_name, u32 *data_written, - u8 *change_status, u8 *addn_status) +static int lancer_cmd_write_object(struct be_adapter *adapter, + struct be_dma_mem *cmd, u32 data_size, + u32 data_offset, const char *obj_name, + u32 *data_written, u8 *change_status, + u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_write_object *req; @@ -2410,7 +2398,8 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) return status; } -int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) +static int lancer_cmd_delete_object(struct be_adapter *adapter, + const char *obj_name) { struct lancer_cmd_req_delete_object *req; struct be_mcc_wrb *wrb; @@ -2485,9 +2474,9 @@ err_unlock: return status; } -int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 flash_type, u32 flash_opcode, u32 img_offset, - u32 buf_size) +static int be_cmd_write_flashrom(struct be_adapter *adapter, + struct be_dma_mem *cmd, u32 flash_type, + u32 flash_opcode, u32 img_offset, u32 buf_size) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; @@ -2533,8 +2522,8 @@ err_unlock: return status; } -int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 img_optype, u32 img_offset, u32 crc_offset) +static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, + u16 img_optype, u32 img_offset, u32 crc_offset) { struct be_cmd_read_flash_crc *req; struct be_mcc_wrb *wrb; @@ -2571,6 +2560,579 @@ err: return status; } +static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; + +static bool phy_flashing_required(struct be_adapter *adapter) +{ + return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && + adapter->phy.interface_type == PHY_TYPE_BASET_10GB); +} + +static bool is_comp_in_ufi(struct be_adapter *adapter, + struct flash_section_info *fsec, int type) +{ + int i = 0, img_type = 0; + struct flash_section_info_g2 *fsec_g2 = NULL; + + if (BE2_chip(adapter)) + fsec_g2 = (struct flash_section_info_g2 *)fsec; + + for (i = 0; i < MAX_FLASH_COMP; i++) { + if (fsec_g2) + img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); + else + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + + if (img_type == type) + return true; + } + return false; +} + +static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, + int header_size, + const struct firmware *fw) +{ + struct flash_section_info *fsec = NULL; + const u8 *p = fw->data; + + p += header_size; + while (p < (fw->data + fw->size)) { + fsec = (struct flash_section_info *)p; + if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) + return fsec; + p += 32; + } + return NULL; +} + +static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, + u32 img_offset, u32 img_size, int hdr_size, + u16 img_optype, bool *crc_match) +{ + u32 crc_offset; + int status; + u8 crc[4]; + + status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, + img_size - 4); + if (status) + return status; + + crc_offset = hdr_size + img_offset + img_size - 4; + + /* Skip flashing, if crc of flashed region matches */ + if (!memcmp(crc, p + crc_offset, 4)) + *crc_match = true; + else + *crc_match = false; + + return status; +} + +static int be_flash(struct be_adapter *adapter, const u8 *img, + struct be_dma_mem *flash_cmd, int optype, int img_size, + u32 img_offset) +{ + u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; + struct be_cmd_write_flashrom *req = flash_cmd->va; + int status; + + while (total_bytes) { + num_bytes = min_t(u32, 32 * 1024, total_bytes); + + total_bytes -= num_bytes; + + if (!total_bytes) { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_FLASH; + else + flash_op = FLASHROM_OPER_FLASH; + } else { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_SAVE; + else + flash_op = FLASHROM_OPER_SAVE; + } + + memcpy(req->data_buf, img, num_bytes); + img += num_bytes; + status = be_cmd_write_flashrom(adapter, flash_cmd, optype, + flash_op, img_offset + + bytes_sent, num_bytes); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && + optype == OPTYPE_PHY_FW) + break; + else if (status) + return status; + + bytes_sent += num_bytes; + } + return 0; +} + +/* For BE2, BE3 and BE3-R */ +static int be_flash_BEx(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + int status, i, filehdr_size, num_comp; + const struct flash_comp *pflashcomp; + bool crc_match; + const u8 *p; + + struct flash_comp gen3_flash_types[] = { + { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, + { BE3_REDBOOT_START, OPTYPE_REDBOOT, + BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, + { BE3_ISCSI_BIOS_START, OPTYPE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, + { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, + { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, + { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, + { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, + { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}, + { BE3_NCSI_START, OPTYPE_NCSI_FW, + BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI}, + { BE3_PHY_FW_START, OPTYPE_PHY_FW, + BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY} + }; + + struct flash_comp gen2_flash_types[] = { + { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, + { BE2_REDBOOT_START, OPTYPE_REDBOOT, + BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, + { BE2_ISCSI_BIOS_START, OPTYPE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, + { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, + { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, + { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, + { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, + { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE} + }; + + if (BE3_chip(adapter)) { + pflashcomp = gen3_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g3); + num_comp = ARRAY_SIZE(gen3_flash_types); + } else { + pflashcomp = gen2_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g2); + num_comp = ARRAY_SIZE(gen2_flash_types); + img_hdrs_size = 0; + } + + /* Get flash section info*/ + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -1; + } + for (i = 0; i < num_comp; i++) { + if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) + continue; + + if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && + memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) + continue; + + if (pflashcomp[i].optype == OPTYPE_PHY_FW && + !phy_flashing_required(adapter)) + continue; + + if (pflashcomp[i].optype == OPTYPE_REDBOOT) { + status = be_check_flash_crc(adapter, fw->data, + pflashcomp[i].offset, + pflashcomp[i].size, + filehdr_size + + img_hdrs_size, + OPTYPE_REDBOOT, &crc_match); + if (status) { + dev_err(dev, + "Could not get CRC for 0x%x region\n", + pflashcomp[i].optype); + continue; + } + + if (crc_match) + continue; + } + + p = fw->data + filehdr_size + pflashcomp[i].offset + + img_hdrs_size; + if (p + pflashcomp[i].size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, + pflashcomp[i].size, 0); + if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + pflashcomp[i].img_type); + return status; + } + } + return 0; +} + +static u16 be_get_img_optype(struct flash_section_entry fsec_entry) +{ + u32 img_type = le32_to_cpu(fsec_entry.type); + u16 img_optype = le16_to_cpu(fsec_entry.optype); + + if (img_optype != 0xFFFF) + return img_optype; + + switch (img_type) { + case IMAGE_FIRMWARE_ISCSI: + img_optype = OPTYPE_ISCSI_ACTIVE; + break; + case IMAGE_BOOT_CODE: + img_optype = OPTYPE_REDBOOT; + break; + case IMAGE_OPTION_ROM_ISCSI: + img_optype = OPTYPE_BIOS; + break; + case IMAGE_OPTION_ROM_PXE: + img_optype = OPTYPE_PXE_BIOS; + break; + case IMAGE_OPTION_ROM_FCOE: + img_optype = OPTYPE_FCOE_BIOS; + break; + case IMAGE_FIRMWARE_BACKUP_ISCSI: + img_optype = OPTYPE_ISCSI_BACKUP; + break; + case IMAGE_NCSI: + img_optype = OPTYPE_NCSI_FW; + break; + case IMAGE_FLASHISM_JUMPVECTOR: + img_optype = OPTYPE_FLASHISM_JUMPVECTOR; + break; + case IMAGE_FIRMWARE_PHY: + img_optype = OPTYPE_SH_PHY_FW; + break; + case IMAGE_REDBOOT_DIR: + img_optype = OPTYPE_REDBOOT_DIR; + break; + case IMAGE_REDBOOT_CONFIG: + img_optype = OPTYPE_REDBOOT_CONFIG; + break; + case IMAGE_UFI_DIR: + img_optype = OPTYPE_UFI_DIR; + break; + default: + break; + } + + return img_optype; +} + +static int be_flash_skyhawk(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = num_of_images * sizeof(struct image_hdr); + bool crc_match, old_fw_img, flash_offset_support = true; + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + u32 img_offset, img_size, img_type; + u16 img_optype, flash_optype; + int status, i, filehdr_size; + const u8 *p; + + filehdr_size = sizeof(struct flash_file_hdr_g3); + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -EINVAL; + } + +retry_flash: + for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { + img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); + img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + img_optype = be_get_img_optype(fsec->fsec_entry[i]); + old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; + + if (img_optype == 0xFFFF) + continue; + + if (flash_offset_support) + flash_optype = OPTYPE_OFFSET_SPECIFIED; + else + flash_optype = img_optype; + + /* Don't bother verifying CRC if an old FW image is being + * flashed + */ + if (old_fw_img) + goto flash; + + status = be_check_flash_crc(adapter, fw->data, img_offset, + img_size, filehdr_size + + img_hdrs_size, flash_optype, + &crc_match); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || + base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { + /* The current FW image on the card does not support + * OFFSET based flashing. Retry using older mechanism + * of OPTYPE based flashing + */ + if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* The current FW image on the card does not recognize + * the new FLASH op_type. The FW download is partially + * complete. Reboot the server now to enable FW image + * to recognize the new FLASH op_type. To complete the + * remaining process, download the same FW again after + * the reboot. + */ + dev_err(dev, "Flash incomplete. Reset the server\n"); + dev_err(dev, "Download FW image again after reset\n"); + return -EAGAIN; + } else if (status) { + dev_err(dev, "Could not get CRC for 0x%x region\n", + img_optype); + return -EFAULT; + } + + if (crc_match) + continue; + +flash: + p = fw->data + filehdr_size + img_offset + img_hdrs_size; + if (p + img_size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, + img_offset); + + /* The current FW image on the card does not support OFFSET + * based flashing. Retry using older mechanism of OPTYPE based + * flashing + */ + if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && + flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* For old FW images ignore ILLEGAL_FIELD error or errors on + * UFI_DIR region + */ + if (old_fw_img && + (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || + (img_optype == OPTYPE_UFI_DIR && + base_status(status) == MCC_STATUS_FAILED))) { + continue; + } else if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + img_type); + + switch (addl_status(status)) { + case MCC_ADDL_STATUS_MISSING_SIGNATURE: + dev_err(dev, + "Digital signature missing in FW\n"); + return -EINVAL; + case MCC_ADDL_STATUS_INVALID_SIGNATURE: + dev_err(dev, + "Invalid digital signature in FW\n"); + return -EINVAL; + default: + return -EFAULT; + } + } + } + return 0; +} + +int lancer_fw_download(struct be_adapter *adapter, + const struct firmware *fw) +{ + struct device *dev = &adapter->pdev->dev; + struct be_dma_mem flash_cmd; + const u8 *data_ptr = NULL; + u8 *dest_image_ptr = NULL; + size_t image_size = 0; + u32 chunk_size = 0; + u32 data_written = 0; + u32 offset = 0; + int status = 0; + u8 add_status = 0; + u8 change_status; + + if (!IS_ALIGNED(fw->size, sizeof(u32))) { + dev_err(dev, "FW image size should be multiple of 4\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + + LANCER_FW_DOWNLOAD_CHUNK; + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + dest_image_ptr = flash_cmd.va + + sizeof(struct lancer_cmd_req_write_object); + image_size = fw->size; + data_ptr = fw->data; + + while (image_size) { + chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); + + /* Copy the image chunk content. */ + memcpy(dest_image_ptr, data_ptr, chunk_size); + + status = lancer_cmd_write_object(adapter, &flash_cmd, + chunk_size, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + if (status) + break; + + offset += data_written; + data_ptr += data_written; + image_size -= data_written; + } + + if (!status) { + /* Commit the FW written */ + status = lancer_cmd_write_object(adapter, &flash_cmd, + 0, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (status) { + dev_err(dev, "Firmware load error\n"); + return be_cmd_status(status); + } + + dev_info(dev, "Firmware flashed successfully\n"); + + if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(dev, "Resetting adapter to activate new FW\n"); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); + if (status) { + dev_err(dev, "Adapter busy, could not reset FW\n"); + dev_err(dev, "Reboot server to activate new FW\n"); + } + } else if (change_status != LANCER_NO_RESET_NEEDED) { + dev_info(dev, "Reboot server to activate new FW\n"); + } + + return 0; +} + +/* Check if the flash image file is compatible with the adapter that + * is being flashed. + */ +static bool be_check_ufi_compatibility(struct be_adapter *adapter, + struct flash_file_hdr_g3 *fhdr) +{ + if (!fhdr) { + dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); + return false; + } + + /* First letter of the build version is used to identify + * which chip this image file is meant for. + */ + switch (fhdr->build[0]) { + case BLD_STR_UFI_TYPE_SH: + if (!skyhawk_chip(adapter)) + return false; + break; + case BLD_STR_UFI_TYPE_BE3: + if (!BE3_chip(adapter)) + return false; + break; + case BLD_STR_UFI_TYPE_BE2: + if (!BE2_chip(adapter)) + return false; + break; + default: + return false; + } + + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); +} + +int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) +{ + struct device *dev = &adapter->pdev->dev; + struct flash_file_hdr_g3 *fhdr3; + struct image_hdr *img_hdr_ptr; + int status = 0, i, num_imgs; + struct be_dma_mem flash_cmd; + + fhdr3 = (struct flash_file_hdr_g3 *)fw->data; + if (!be_check_ufi_compatibility(adapter, fhdr3)) { + dev_err(dev, "Flash image is not compatible with adapter\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct be_cmd_write_flashrom); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { + img_hdr_ptr = (struct image_hdr *)(fw->data + + (sizeof(struct flash_file_hdr_g3) + + i * sizeof(struct image_hdr))); + if (!BE2_chip(adapter) && + le32_to_cpu(img_hdr_ptr->imageid) != 1) + continue; + + if (skyhawk_chip(adapter)) + status = be_flash_skyhawk(adapter, fw, &flash_cmd, + num_imgs); + else + status = be_flash_BEx(adapter, fw, &flash_cmd, + num_imgs); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (!status) + dev_info(dev, "Firmware flashed successfully\n"); + + return status; +} + int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd) { @@ -2892,7 +3454,6 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; - adapter->pci_func_num = attribs->pci_func_num; serial_num = attribs->hba_attribs.controller_serial_number; for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & @@ -3575,14 +4136,16 @@ int be_cmd_query_port_name(struct be_adapter *adapter) return status; } -/* Descriptor type */ -enum { - FUNC_DESC = 1, - VFT_DESC = 2 -}; - +/* When more than 1 NIC descriptor is present in the descriptor list, + * the caller must specify the pf_num to obtain the NIC descriptor + * corresponding to its pci function. + * get_vft must be true when the caller wants the VF-template desc of the + * PF-pool. + * The pf_num should be set to PF_NUM_IGNORE when the caller knows + * that only it's NIC descriptor is present in the descriptor list. + */ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, - int desc_type) + bool get_vft, u8 pf_num) { struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; struct be_nic_res_desc *nic; @@ -3592,40 +4155,42 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { nic = (struct be_nic_res_desc *)hdr; - if (desc_type == FUNC_DESC || - (desc_type == VFT_DESC && - nic->flags & (1 << VFT_SHIFT))) + + if ((pf_num == PF_NUM_IGNORE || + nic->pf_num == pf_num) && + (!get_vft || nic->flags & BIT(VFT_SHIFT))) return nic; } - hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; hdr = (void *)hdr + hdr->desc_len; } return NULL; } -static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) +static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count, + u8 pf_num) { - return be_get_nic_desc(buf, desc_count, VFT_DESC); + return be_get_nic_desc(buf, desc_count, true, pf_num); } -static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) +static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count, + u8 pf_num) { - return be_get_nic_desc(buf, desc_count, FUNC_DESC); + return be_get_nic_desc(buf, desc_count, false, pf_num); } -static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, - u32 desc_count) +static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count, + u8 pf_num) { struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; struct be_pcie_res_desc *pcie; int i; for (i = 0; i < desc_count; i++) { - if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || - hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { - pcie = (struct be_pcie_res_desc *)hdr; - if (pcie->pf_num == devfn) + if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_num == pf_num) return pcie; } @@ -3710,13 +4275,23 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) u32 desc_count = le32_to_cpu(resp->desc_count); struct be_nic_res_desc *desc; - desc = be_get_func_nic_desc(resp->func_param, desc_count); + /* GET_FUNC_CONFIG returns resource descriptors of the + * current function only. So, pf_num should be set to + * PF_NUM_IGNORE. + */ + desc = be_get_func_nic_desc(resp->func_param, desc_count, + PF_NUM_IGNORE); if (!desc) { status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; - be_copy_nic_desc(res, desc); + + /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */ + adapter->pf_num = desc->pf_num; + adapter->vf_num = desc->vf_num; + + if (res) + be_copy_nic_desc(res, desc); } err: mutex_unlock(&adapter->mbox_lock); @@ -3726,10 +4301,7 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created - * non-zero domain => a PF is querying this on behalf of a VF - * zero domain => a PF or a VF is querying this for itself - */ +/* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3759,12 +4331,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; - /* When a function is querying profile information relating to - * itself hdr.pf_number must be set to it's pci_func_num + 1 - */ req->hdr.domain = domain; - if (domain == 0) - req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3780,8 +4347,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, resp = cmd.va; desc_count = le16_to_cpu(resp->desc_count); - pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, - desc_count); + pcie = be_get_pcie_desc(resp->func_param, desc_count, + adapter->pf_num); if (pcie) res->max_vfs = le16_to_cpu(pcie->num_vfs); @@ -3789,11 +4356,13 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (port) adapter->mc_type = port->mc_type; - nic = be_get_func_nic_desc(resp->func_param, desc_count); + nic = be_get_func_nic_desc(resp->func_param, desc_count, + adapter->pf_num); if (nic) be_copy_nic_desc(res, nic); - vf_res = be_get_vft_desc(resp->func_param, desc_count); + vf_res = be_get_vft_desc(resp->func_param, desc_count, + adapter->pf_num); if (vf_res) res->vf_if_cap_flags = vf_res->cap_flags; err: @@ -3883,7 +4452,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, return be_cmd_set_qos(adapter, max_rate / 10, domain); be_reset_nic_desc(&nic_desc); - nic_desc.pf_num = adapter->pf_number; + nic_desc.pf_num = adapter->pf_num; nic_desc.vf_num = domain; nic_desc.bw_min = 0; if (lancer_chip(adapter)) { @@ -4260,16 +4829,13 @@ err: return status; } -int be_cmd_set_logical_link_config(struct be_adapter *adapter, - int link_state, u8 domain) +int __be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, int version, u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_ll_link *req; int status; - if (BEx_chip(adapter) || lancer_chip(adapter)) - return -EOPNOTSUPP; - spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -4284,14 +4850,15 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter, OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, sizeof(*req), wrb, NULL); - req->hdr.version = 1; + req->hdr.version = version; req->hdr.domain = domain; - if (link_state == IFLA_VF_LINK_STATE_ENABLE) - req->link_config |= 1; + if (link_state == IFLA_VF_LINK_STATE_ENABLE || + link_state == IFLA_VF_LINK_STATE_AUTO) + req->link_config |= PLINK_ENABLE; if (link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= 1 << PLINK_TRACK_SHIFT; + req->link_config |= PLINK_TRACK; status = be_mcc_notify_wait(adapter); err: @@ -4299,6 +4866,25 @@ err: return status; } +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain) +{ + int status; + + if (BEx_chip(adapter)) + return -EOPNOTSUPP; + + status = __be_cmd_set_logical_link_config(adapter, link_state, + 2, domain); + + /* Version 2 of the command will not be recognized by older FW. + * On such a failure issue version 1 of the command. + */ + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST) + status = __be_cmd_set_logical_link_config(adapter, link_state, + 1, domain); + return status; +} int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 91155ea74f34..241819b36ca7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -66,7 +66,9 @@ enum mcc_addl_status { MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16, MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d, MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a, - MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab + MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab, + MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56, + MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57 }; #define CQE_BASE_STATUS_MASK 0xFFFF @@ -289,9 +291,7 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd1; /* dword 3 */ - u8 pf_num; /* dword 3 */ - u8 rsvd2; /* dword 3 */ + u8 rsvd[3]; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1207,68 +1207,85 @@ struct be_cmd_resp_get_beacon_state { /* Flashrom related descriptors */ #define MAX_FLASH_COMP 32 -#define OPTYPE_ISCSI_ACTIVE 0 -#define OPTYPE_REDBOOT 1 -#define OPTYPE_BIOS 2 -#define OPTYPE_PXE_BIOS 3 -#define OPTYPE_OFFSET_SPECIFIED 7 -#define OPTYPE_FCOE_BIOS 8 -#define OPTYPE_ISCSI_BACKUP 9 -#define OPTYPE_FCOE_FW_ACTIVE 10 -#define OPTYPE_FCOE_FW_BACKUP 11 -#define OPTYPE_NCSI_FW 13 -#define OPTYPE_REDBOOT_DIR 18 -#define OPTYPE_REDBOOT_CONFIG 19 -#define OPTYPE_SH_PHY_FW 21 -#define OPTYPE_FLASHISM_JUMPVECTOR 22 -#define OPTYPE_UFI_DIR 23 -#define OPTYPE_PHY_FW 99 - -#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */ -#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */ -#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */ - -#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144 -#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144 -#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */ -#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */ -#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */ - -/* Offsets for components on Flash. */ -#define FLASH_REDBOOT_START_g2 0 -#define FLASH_FCoE_BIOS_START_g2 524288 -#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576 -#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296 -#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016 -#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736 -#define FLASH_iSCSI_BIOS_START_g2 7340032 -#define FLASH_PXE_BIOS_START_g2 7864320 - -#define FLASH_REDBOOT_START_g3 262144 -#define FLASH_PHY_FW_START_g3 1310720 -#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152 -#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304 -#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456 -#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608 -#define FLASH_iSCSI_BIOS_START_g3 12582912 -#define FLASH_PXE_BIOS_START_g3 13107200 -#define FLASH_FCoE_BIOS_START_g3 13631488 -#define FLASH_NCSI_START_g3 15990784 - -#define IMAGE_NCSI 16 -#define IMAGE_OPTION_ROM_PXE 32 -#define IMAGE_OPTION_ROM_FCoE 33 -#define IMAGE_OPTION_ROM_ISCSI 34 -#define IMAGE_FLASHISM_JUMPVECTOR 48 -#define IMAGE_FIRMWARE_iSCSI 160 -#define IMAGE_FIRMWARE_FCoE 162 -#define IMAGE_FIRMWARE_BACKUP_iSCSI 176 -#define IMAGE_FIRMWARE_BACKUP_FCoE 178 -#define IMAGE_FIRMWARE_PHY 192 -#define IMAGE_REDBOOT_DIR 208 -#define IMAGE_REDBOOT_CONFIG 209 -#define IMAGE_UFI_DIR 210 -#define IMAGE_BOOT_CODE 224 +/* Optypes of each component in the UFI */ +enum { + OPTYPE_ISCSI_ACTIVE = 0, + OPTYPE_REDBOOT = 1, + OPTYPE_BIOS = 2, + OPTYPE_PXE_BIOS = 3, + OPTYPE_OFFSET_SPECIFIED = 7, + OPTYPE_FCOE_BIOS = 8, + OPTYPE_ISCSI_BACKUP = 9, + OPTYPE_FCOE_FW_ACTIVE = 10, + OPTYPE_FCOE_FW_BACKUP = 11, + OPTYPE_NCSI_FW = 13, + OPTYPE_REDBOOT_DIR = 18, + OPTYPE_REDBOOT_CONFIG = 19, + OPTYPE_SH_PHY_FW = 21, + OPTYPE_FLASHISM_JUMPVECTOR = 22, + OPTYPE_UFI_DIR = 23, + OPTYPE_PHY_FW = 99 +}; + +/* Maximum sizes of components in BE2 FW UFI */ +enum { + BE2_BIOS_COMP_MAX_SIZE = 0x40000, + BE2_REDBOOT_COMP_MAX_SIZE = 0x40000, + BE2_COMP_MAX_SIZE = 0x140000 +}; + +/* Maximum sizes of components in BE3 FW UFI */ +enum { + BE3_NCSI_COMP_MAX_SIZE = 0x40000, + BE3_PHY_FW_COMP_MAX_SIZE = 0x40000, + BE3_BIOS_COMP_MAX_SIZE = 0x80000, + BE3_REDBOOT_COMP_MAX_SIZE = 0x100000, + BE3_COMP_MAX_SIZE = 0x200000 +}; + +/* Offsets for components in BE2 FW UFI */ +enum { + BE2_REDBOOT_START = 0x8000, + BE2_FCOE_BIOS_START = 0x80000, + BE2_ISCSI_PRIMARY_IMAGE_START = 0x100000, + BE2_ISCSI_BACKUP_IMAGE_START = 0x240000, + BE2_FCOE_PRIMARY_IMAGE_START = 0x380000, + BE2_FCOE_BACKUP_IMAGE_START = 0x4c0000, + BE2_ISCSI_BIOS_START = 0x700000, + BE2_PXE_BIOS_START = 0x780000 +}; + +/* Offsets for components in BE3 FW UFI */ +enum { + BE3_REDBOOT_START = 0x40000, + BE3_PHY_FW_START = 0x140000, + BE3_ISCSI_PRIMARY_IMAGE_START = 0x200000, + BE3_ISCSI_BACKUP_IMAGE_START = 0x400000, + BE3_FCOE_PRIMARY_IMAGE_START = 0x600000, + BE3_FCOE_BACKUP_IMAGE_START = 0x800000, + BE3_ISCSI_BIOS_START = 0xc00000, + BE3_PXE_BIOS_START = 0xc80000, + BE3_FCOE_BIOS_START = 0xd00000, + BE3_NCSI_START = 0xf40000 +}; + +/* Component entry types */ +enum { + IMAGE_NCSI = 0x10, + IMAGE_OPTION_ROM_PXE = 0x20, + IMAGE_OPTION_ROM_FCOE = 0x21, + IMAGE_OPTION_ROM_ISCSI = 0x22, + IMAGE_FLASHISM_JUMPVECTOR = 0x30, + IMAGE_FIRMWARE_ISCSI = 0xa0, + IMAGE_FIRMWARE_FCOE = 0xa2, + IMAGE_FIRMWARE_BACKUP_ISCSI = 0xb0, + IMAGE_FIRMWARE_BACKUP_FCOE = 0xb2, + IMAGE_FIRMWARE_PHY = 0xc0, + IMAGE_REDBOOT_DIR = 0xd0, + IMAGE_REDBOOT_CONFIG = 0xd1, + IMAGE_UFI_DIR = 0xd2, + IMAGE_BOOT_CODE = 0xe2 +}; struct controller_id { u32 vendor; @@ -1394,6 +1411,9 @@ struct be_cmd_read_flash_crc { } __packed; /**************** Lancer Firmware Flash ************/ +#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) +#define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct amap_lancer_write_obj_context { u8 write_length[24]; u8 reserved1[7]; @@ -1654,11 +1674,7 @@ struct mgmt_hba_attribs { struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[2]; - u16 rsvd1; - u8 pci_func_num; - u8 rsvd2; - u32 rsvd3[7]; + u32 rsvd0[10]; } __packed; struct be_cmd_req_cntl_attribs { @@ -2083,6 +2099,7 @@ struct be_port_res_desc { #define NV_TYPE_VXLAN 3 #define SOCVID_SHIFT 2 /* Strip outer vlan */ #define RCVID_SHIFT 4 /* Report vlan */ +#define PF_NUM_IGNORE 255 u8 nv_flags; u8 rsvd2; __le16 nv_port; /* vxlan/gre port */ @@ -2246,7 +2263,8 @@ struct be_cmd_resp_get_iface_list { }; /*************** Set logical link ********************/ -#define PLINK_TRACK_SHIFT 8 +#define PLINK_ENABLE BIT(0) +#define PLINK_TRACK BIT(8) struct be_cmd_req_set_ll_link { struct be_cmd_req_hdr hdr; u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */ @@ -2321,19 +2339,11 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, u8 page_num, u8 *data); int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter); -int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 flash_oper, u32 flash_opcode, u32 img_offset, - u32 buf_size); -int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, - const char *obj_name, u32 *data_written, - u8 *change_status, u8 *addn_status); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_read, u32 *eof, u8 *addn_status); -int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name); -int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 img_optype, u32 img_offset, u32 crc_offset); +int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw); +int be_fw_download(struct be_adapter *adapter, const struct firmware *fw); int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd); int be_cmd_fw_init(struct be_adapter *adapter); @@ -2355,9 +2365,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, void be_detect_error(struct be_adapter *adapter); int be_cmd_get_die_temperature(struct be_adapter *adapter); int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size); +int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_req_native_mode(struct be_adapter *adapter); -int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 734f655c99c1..a19ac441336f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -241,17 +241,28 @@ static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) u32 data_read = 0, eof; u8 addn_status; struct be_dma_mem data_len_cmd; - int status; memset(&data_len_cmd, 0, sizeof(data_len_cmd)); /* data_offset and data_size should be 0 to get reg len */ - status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, - file_name, &data_read, &eof, - &addn_status); + lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name, + &data_read, &eof, &addn_status); return data_read; } +static int be_get_dump_len(struct be_adapter *adapter) +{ + u32 dump_size = 0; + + if (lancer_chip(adapter)) + dump_size = lancer_cmd_get_file_len(adapter, + LANCER_FW_DUMP_FILE); + else + dump_size = adapter->fat_dump_len; + + return dump_size; +} + static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, u32 buf_len, void *buf) { @@ -293,37 +304,18 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, return status; } -static int be_get_reg_len(struct net_device *netdev) +static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len, + void *buf) { - struct be_adapter *adapter = netdev_priv(netdev); - u32 log_size = 0; - - if (!check_privilege(adapter, MAX_PRIVILEGES)) - return 0; - - if (be_physfn(adapter)) { - if (lancer_chip(adapter)) - log_size = lancer_cmd_get_file_len(adapter, - LANCER_FW_DUMP_FILE); - else - be_cmd_get_reg_len(adapter, &log_size); - } - return log_size; -} + int status = 0; -static void -be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) -{ - struct be_adapter *adapter = netdev_priv(netdev); + if (lancer_chip(adapter)) + status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, + dump_len, buf); + else + status = be_cmd_get_fat_dump(adapter, dump_len, buf); - if (be_physfn(adapter)) { - memset(buf, 0, regs->len); - if (lancer_chip(adapter)) - lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, - regs->len, buf); - else - be_cmd_get_regs(adapter, regs->len, buf); - } + return status; } static int be_get_coalesce(struct net_device *netdev, @@ -916,6 +908,34 @@ static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) return be_load_fw(adapter, efl->data); } +static int +be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + dump->len = be_get_dump_len(adapter); + dump->version = 1; + dump->flag = 0x1; /* FW dump is enabled */ + return 0; +} + +static int +be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buf) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_read_dump_data(adapter, dump->len, buf); + return be_cmd_status(status); +} + static int be_get_eeprom_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1313,8 +1333,6 @@ const struct ethtool_ops be_ethtool_ops = { .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, .get_ethtool_stats = be_get_ethtool_stats, - .get_regs_len = be_get_reg_len, - .get_regs = be_get_regs, .flash_device = be_do_flash, .self_test = be_self_test, .get_rxnfc = be_get_rxnfc, @@ -1323,6 +1341,8 @@ const struct ethtool_ops be_ethtool_ops = { .get_rxfh_key_size = be_get_rxfh_key_size, .get_rxfh = be_get_rxfh, .set_rxfh = be_set_rxfh, + .get_dump_flag = be_get_dump_flag, + .get_dump_data = be_get_dump_data, .get_channels = be_get_channels, .set_channels = be_set_channels, .get_module_info = be_get_module_info, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b6ad02909d6b..f99de3657ce3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -729,7 +729,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | - adapter->recommended_prio; + adapter->recommended_prio_bits; return vlan_tag; } @@ -2184,7 +2184,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); skb->csum_level = rxcp->tunneled; - skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -2631,7 +2630,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); } return 0; } @@ -3299,8 +3297,10 @@ static int be_msix_register(struct be_adapter *adapter) return 0; err_msix: - for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) + for (i--; i >= 0; i--) { + eqo = &adapter->eq_obj[i]; free_irq(be_msix_vec_get(adapter, eqo), eqo); + } dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", status); be_msix_disable(adapter); @@ -3432,8 +3432,6 @@ static int be_close(struct net_device *netdev) be_disable_if_filters(adapter); - be_roce_dev_close(adapter); - if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); @@ -3601,8 +3599,6 @@ static int be_open(struct net_device *netdev) be_link_status_update(adapter, link_status); netif_tx_start_all_queues(netdev); - be_roce_dev_open(adapter); - #ifdef CONFIG_BE2NET_VXLAN if (skyhawk_chip(adapter)) vxlan_get_rx_port(netdev); @@ -4206,10 +4202,17 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + status = be_cmd_query_fw_cfg(adapter); if (status) return status; + if (!lancer_chip(adapter) && be_physfn(adapter)) + be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len); + if (BEx_chip(adapter)) { level = be_cmd_get_fw_log_level(adapter); adapter->msg_enable = @@ -4404,10 +4407,14 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); - /* Need to invoke this cmd first to get the PCI Function Number */ - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; + /* invoke this cmd first to get pf_num and vf_num which are needed + * for issuing profile related cmds + */ + if (!BEx_chip(adapter)) { + status = be_cmd_get_func_config(adapter, NULL); + if (status) + return status; + } if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4492,570 +4499,6 @@ static void be_netpoll(struct net_device *netdev) } #endif -static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; - -static bool phy_flashing_required(struct be_adapter *adapter) -{ - return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && - adapter->phy.interface_type == PHY_TYPE_BASET_10GB); -} - -static bool is_comp_in_ufi(struct be_adapter *adapter, - struct flash_section_info *fsec, int type) -{ - int i = 0, img_type = 0; - struct flash_section_info_g2 *fsec_g2 = NULL; - - if (BE2_chip(adapter)) - fsec_g2 = (struct flash_section_info_g2 *)fsec; - - for (i = 0; i < MAX_FLASH_COMP; i++) { - if (fsec_g2) - img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); - else - img_type = le32_to_cpu(fsec->fsec_entry[i].type); - - if (img_type == type) - return true; - } - return false; - -} - -static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, - int header_size, - const struct firmware *fw) -{ - struct flash_section_info *fsec = NULL; - const u8 *p = fw->data; - - p += header_size; - while (p < (fw->data + fw->size)) { - fsec = (struct flash_section_info *)p; - if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) - return fsec; - p += 32; - } - return NULL; -} - -static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, - u32 img_offset, u32 img_size, int hdr_size, - u16 img_optype, bool *crc_match) -{ - u32 crc_offset; - int status; - u8 crc[4]; - - status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, - img_size - 4); - if (status) - return status; - - crc_offset = hdr_size + img_offset + img_size - 4; - - /* Skip flashing, if crc of flashed region matches */ - if (!memcmp(crc, p + crc_offset, 4)) - *crc_match = true; - else - *crc_match = false; - - return status; -} - -static int be_flash(struct be_adapter *adapter, const u8 *img, - struct be_dma_mem *flash_cmd, int optype, int img_size, - u32 img_offset) -{ - u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; - struct be_cmd_write_flashrom *req = flash_cmd->va; - int status; - - while (total_bytes) { - num_bytes = min_t(u32, 32*1024, total_bytes); - - total_bytes -= num_bytes; - - if (!total_bytes) { - if (optype == OPTYPE_PHY_FW) - flash_op = FLASHROM_OPER_PHY_FLASH; - else - flash_op = FLASHROM_OPER_FLASH; - } else { - if (optype == OPTYPE_PHY_FW) - flash_op = FLASHROM_OPER_PHY_SAVE; - else - flash_op = FLASHROM_OPER_SAVE; - } - - memcpy(req->data_buf, img, num_bytes); - img += num_bytes; - status = be_cmd_write_flashrom(adapter, flash_cmd, optype, - flash_op, img_offset + - bytes_sent, num_bytes); - if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && - optype == OPTYPE_PHY_FW) - break; - else if (status) - return status; - - bytes_sent += num_bytes; - } - return 0; -} - -/* For BE2, BE3 and BE3-R */ -static int be_flash_BEx(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) -{ - int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); - struct device *dev = &adapter->pdev->dev; - struct flash_section_info *fsec = NULL; - int status, i, filehdr_size, num_comp; - const struct flash_comp *pflashcomp; - bool crc_match; - const u8 *p; - - struct flash_comp gen3_flash_types[] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI}, - { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE}, - { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI}, - { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE}, - { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE}, - { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE}, - { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW, - FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI}, - { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW, - FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY} - }; - - struct flash_comp gen2_flash_types[] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI}, - { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE}, - { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI}, - { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE}, - { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE}, - { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} - }; - - if (BE3_chip(adapter)) { - pflashcomp = gen3_flash_types; - filehdr_size = sizeof(struct flash_file_hdr_g3); - num_comp = ARRAY_SIZE(gen3_flash_types); - } else { - pflashcomp = gen2_flash_types; - filehdr_size = sizeof(struct flash_file_hdr_g2); - num_comp = ARRAY_SIZE(gen2_flash_types); - img_hdrs_size = 0; - } - - /* Get flash section info*/ - fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); - if (!fsec) { - dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); - return -1; - } - for (i = 0; i < num_comp; i++) { - if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) - continue; - - if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && - memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) - continue; - - if (pflashcomp[i].optype == OPTYPE_PHY_FW && - !phy_flashing_required(adapter)) - continue; - - if (pflashcomp[i].optype == OPTYPE_REDBOOT) { - status = be_check_flash_crc(adapter, fw->data, - pflashcomp[i].offset, - pflashcomp[i].size, - filehdr_size + - img_hdrs_size, - OPTYPE_REDBOOT, &crc_match); - if (status) { - dev_err(dev, - "Could not get CRC for 0x%x region\n", - pflashcomp[i].optype); - continue; - } - - if (crc_match) - continue; - } - - p = fw->data + filehdr_size + pflashcomp[i].offset + - img_hdrs_size; - if (p + pflashcomp[i].size > fw->data + fw->size) - return -1; - - status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, - pflashcomp[i].size, 0); - if (status) { - dev_err(dev, "Flashing section type 0x%x failed\n", - pflashcomp[i].img_type); - return status; - } - } - return 0; -} - -static u16 be_get_img_optype(struct flash_section_entry fsec_entry) -{ - u32 img_type = le32_to_cpu(fsec_entry.type); - u16 img_optype = le16_to_cpu(fsec_entry.optype); - - if (img_optype != 0xFFFF) - return img_optype; - - switch (img_type) { - case IMAGE_FIRMWARE_iSCSI: - img_optype = OPTYPE_ISCSI_ACTIVE; - break; - case IMAGE_BOOT_CODE: - img_optype = OPTYPE_REDBOOT; - break; - case IMAGE_OPTION_ROM_ISCSI: - img_optype = OPTYPE_BIOS; - break; - case IMAGE_OPTION_ROM_PXE: - img_optype = OPTYPE_PXE_BIOS; - break; - case IMAGE_OPTION_ROM_FCoE: - img_optype = OPTYPE_FCOE_BIOS; - break; - case IMAGE_FIRMWARE_BACKUP_iSCSI: - img_optype = OPTYPE_ISCSI_BACKUP; - break; - case IMAGE_NCSI: - img_optype = OPTYPE_NCSI_FW; - break; - case IMAGE_FLASHISM_JUMPVECTOR: - img_optype = OPTYPE_FLASHISM_JUMPVECTOR; - break; - case IMAGE_FIRMWARE_PHY: - img_optype = OPTYPE_SH_PHY_FW; - break; - case IMAGE_REDBOOT_DIR: - img_optype = OPTYPE_REDBOOT_DIR; - break; - case IMAGE_REDBOOT_CONFIG: - img_optype = OPTYPE_REDBOOT_CONFIG; - break; - case IMAGE_UFI_DIR: - img_optype = OPTYPE_UFI_DIR; - break; - default: - break; - } - - return img_optype; -} - -static int be_flash_skyhawk(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) -{ - int img_hdrs_size = num_of_images * sizeof(struct image_hdr); - bool crc_match, old_fw_img, flash_offset_support = true; - struct device *dev = &adapter->pdev->dev; - struct flash_section_info *fsec = NULL; - u32 img_offset, img_size, img_type; - u16 img_optype, flash_optype; - int status, i, filehdr_size; - const u8 *p; - - filehdr_size = sizeof(struct flash_file_hdr_g3); - fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); - if (!fsec) { - dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); - return -EINVAL; - } - -retry_flash: - for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { - img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); - img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); - img_type = le32_to_cpu(fsec->fsec_entry[i].type); - img_optype = be_get_img_optype(fsec->fsec_entry[i]); - old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; - - if (img_optype == 0xFFFF) - continue; - - if (flash_offset_support) - flash_optype = OPTYPE_OFFSET_SPECIFIED; - else - flash_optype = img_optype; - - /* Don't bother verifying CRC if an old FW image is being - * flashed - */ - if (old_fw_img) - goto flash; - - status = be_check_flash_crc(adapter, fw->data, img_offset, - img_size, filehdr_size + - img_hdrs_size, flash_optype, - &crc_match); - if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || - base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { - /* The current FW image on the card does not support - * OFFSET based flashing. Retry using older mechanism - * of OPTYPE based flashing - */ - if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { - flash_offset_support = false; - goto retry_flash; - } - - /* The current FW image on the card does not recognize - * the new FLASH op_type. The FW download is partially - * complete. Reboot the server now to enable FW image - * to recognize the new FLASH op_type. To complete the - * remaining process, download the same FW again after - * the reboot. - */ - dev_err(dev, "Flash incomplete. Reset the server\n"); - dev_err(dev, "Download FW image again after reset\n"); - return -EAGAIN; - } else if (status) { - dev_err(dev, "Could not get CRC for 0x%x region\n", - img_optype); - return -EFAULT; - } - - if (crc_match) - continue; - -flash: - p = fw->data + filehdr_size + img_offset + img_hdrs_size; - if (p + img_size > fw->data + fw->size) - return -1; - - status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, - img_offset); - - /* The current FW image on the card does not support OFFSET - * based flashing. Retry using older mechanism of OPTYPE based - * flashing - */ - if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && - flash_optype == OPTYPE_OFFSET_SPECIFIED) { - flash_offset_support = false; - goto retry_flash; - } - - /* For old FW images ignore ILLEGAL_FIELD error or errors on - * UFI_DIR region - */ - if (old_fw_img && - (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || - (img_optype == OPTYPE_UFI_DIR && - base_status(status) == MCC_STATUS_FAILED))) { - continue; - } else if (status) { - dev_err(dev, "Flashing section type 0x%x failed\n", - img_type); - return -EFAULT; - } - } - return 0; -} - -static int lancer_fw_download(struct be_adapter *adapter, - const struct firmware *fw) -{ -#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) -#define LANCER_FW_DOWNLOAD_LOCATION "/prg" - struct device *dev = &adapter->pdev->dev; - struct be_dma_mem flash_cmd; - const u8 *data_ptr = NULL; - u8 *dest_image_ptr = NULL; - size_t image_size = 0; - u32 chunk_size = 0; - u32 data_written = 0; - u32 offset = 0; - int status = 0; - u8 add_status = 0; - u8 change_status; - - if (!IS_ALIGNED(fw->size, sizeof(u32))) { - dev_err(dev, "FW image size should be multiple of 4\n"); - return -EINVAL; - } - - flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) - + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); - if (!flash_cmd.va) - return -ENOMEM; - - dest_image_ptr = flash_cmd.va + - sizeof(struct lancer_cmd_req_write_object); - image_size = fw->size; - data_ptr = fw->data; - - while (image_size) { - chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); - - /* Copy the image chunk content. */ - memcpy(dest_image_ptr, data_ptr, chunk_size); - - status = lancer_cmd_write_object(adapter, &flash_cmd, - chunk_size, offset, - LANCER_FW_DOWNLOAD_LOCATION, - &data_written, &change_status, - &add_status); - if (status) - break; - - offset += data_written; - data_ptr += data_written; - image_size -= data_written; - } - - if (!status) { - /* Commit the FW written */ - status = lancer_cmd_write_object(adapter, &flash_cmd, - 0, offset, - LANCER_FW_DOWNLOAD_LOCATION, - &data_written, &change_status, - &add_status); - } - - dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); - if (status) { - dev_err(dev, "Firmware load error\n"); - return be_cmd_status(status); - } - - dev_info(dev, "Firmware flashed successfully\n"); - - if (change_status == LANCER_FW_RESET_NEEDED) { - dev_info(dev, "Resetting adapter to activate new FW\n"); - status = lancer_physdev_ctrl(adapter, - PHYSDEV_CONTROL_FW_RESET_MASK); - if (status) { - dev_err(dev, "Adapter busy, could not reset FW\n"); - dev_err(dev, "Reboot server to activate new FW\n"); - } - } else if (change_status != LANCER_NO_RESET_NEEDED) { - dev_info(dev, "Reboot server to activate new FW\n"); - } - - return 0; -} - -/* Check if the flash image file is compatible with the adapter that - * is being flashed. - */ -static bool be_check_ufi_compatibility(struct be_adapter *adapter, - struct flash_file_hdr_g3 *fhdr) -{ - if (!fhdr) { - dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return false; - } - - /* First letter of the build version is used to identify - * which chip this image file is meant for. - */ - switch (fhdr->build[0]) { - case BLD_STR_UFI_TYPE_SH: - if (!skyhawk_chip(adapter)) - return false; - break; - case BLD_STR_UFI_TYPE_BE3: - if (!BE3_chip(adapter)) - return false; - break; - case BLD_STR_UFI_TYPE_BE2: - if (!BE2_chip(adapter)) - return false; - break; - default: - return false; - } - - /* In BE3 FW images the "asic_type_rev" field doesn't track the - * asic_rev of the chips it is compatible with. - * When asic_type_rev is 0 the image is compatible only with - * pre-BE3-R chips (asic_rev < 0x10) - */ - if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) - return adapter->asic_rev < 0x10; - else - return (fhdr->asic_type_rev >= adapter->asic_rev); -} - -static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) -{ - struct device *dev = &adapter->pdev->dev; - struct flash_file_hdr_g3 *fhdr3; - struct image_hdr *img_hdr_ptr; - int status = 0, i, num_imgs; - struct be_dma_mem flash_cmd; - - fhdr3 = (struct flash_file_hdr_g3 *)fw->data; - if (!be_check_ufi_compatibility(adapter, fhdr3)) { - dev_err(dev, "Flash image is not compatible with adapter\n"); - return -EINVAL; - } - - flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); - if (!flash_cmd.va) - return -ENOMEM; - - num_imgs = le32_to_cpu(fhdr3->num_imgs); - for (i = 0; i < num_imgs; i++) { - img_hdr_ptr = (struct image_hdr *)(fw->data + - (sizeof(struct flash_file_hdr_g3) + - i * sizeof(struct image_hdr))); - if (!BE2_chip(adapter) && - le32_to_cpu(img_hdr_ptr->imageid) != 1) - continue; - - if (skyhawk_chip(adapter)) - status = be_flash_skyhawk(adapter, fw, &flash_cmd, - num_imgs); - else - status = be_flash_BEx(adapter, fw, &flash_cmd, - num_imgs); - } - - dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); - if (!status) - dev_info(dev, "Firmware flashed successfully\n"); - - return status; -} - int be_load_fw(struct be_adapter *adapter, u8 *fw_file) { const struct firmware *fw; @@ -5110,6 +4553,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return -EINVAL; mode = nla_get_u16(attr); + if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA) + return -EOPNOTSUPP; + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; @@ -5291,7 +4737,7 @@ static netdev_features_t be_features_check(struct sk_buff *skb, skb->inner_protocol != htons(ETH_P_TEB) || skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 60368207bf58..4089156a7f5e 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter) } } -static void _be_roce_dev_open(struct be_adapter *adapter) -{ - if (ocrdma_drv && adapter->ocrdma_dev && - ocrdma_drv->state_change_handler) - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, - BE_DEV_UP); -} - -void be_roce_dev_open(struct be_adapter *adapter) -{ - if (be_roce_supported(adapter)) { - mutex_lock(&be_adapter_list_lock); - _be_roce_dev_open(adapter); - mutex_unlock(&be_adapter_list_lock); - } -} - -static void _be_roce_dev_close(struct be_adapter *adapter) -{ - if (ocrdma_drv && adapter->ocrdma_dev && - ocrdma_drv->state_change_handler) - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, - BE_DEV_DOWN); -} - -void be_roce_dev_close(struct be_adapter *adapter) -{ - if (be_roce_supported(adapter)) { - mutex_lock(&be_adapter_list_lock); - _be_roce_dev_close(adapter); - mutex_unlock(&be_adapter_list_lock); - } -} - void be_roce_dev_shutdown(struct be_adapter *adapter) { if (be_roce_supported(adapter)) { @@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv) _be_roce_dev_add(dev); netdev = dev->netdev; - if (netif_running(netdev) && netif_oper_up(netdev)) - _be_roce_dev_open(dev); } mutex_unlock(&be_adapter_list_lock); return 0; diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index cde6ef905ec4..fde609789483 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -60,9 +60,7 @@ struct ocrdma_driver { void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); }; -enum { - BE_DEV_UP = 0, - BE_DEV_DOWN = 1, +enum be_roce_event { BE_DEV_SHUTDOWN = 2 }; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ff665493ca97..62fa136554ac 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -678,7 +678,7 @@ static int ethoc_mdio_probe(struct net_device *dev) int err; if (priv->phy_id != -1) - phy = priv->mdio->phy_map[priv->phy_id]; + phy = mdiobus_get_phy(priv->mdio, priv->phy_id); else phy = phy_find_first(priv->mdio); @@ -766,7 +766,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; - phy = priv->mdio->phy_map[mdio->phy_id]; + phy = mdiobus_get_phy(priv->mdio, mdio->phy_id); if (!phy) return -ENODEV; } else { @@ -1015,7 +1015,6 @@ static int ethoc_probe(struct platform_device *pdev) struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; - unsigned int phy; int num_bd; int ret = 0; bool random_mac = false; @@ -1206,19 +1205,10 @@ static int ethoc_probe(struct platform_device *pdev) priv->mdio->write = ethoc_mdio_write; priv->mdio->priv = priv; - priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mdio->irq) { - ret = -ENOMEM; - goto free_mdio; - } - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - priv->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); - goto free_mdio; + goto free; } ret = ethoc_mdio_probe(netdev); @@ -1250,8 +1240,6 @@ error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); -free_mdio: - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: if (priv->clk) diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 63c2bcf8031a..b1026689b78f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { - u32 buf = - nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - - /* to accommodate word-unaligned address of "reg" - * we have to do memcpy_toio() instead of simple "=". - */ - memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); + u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + put_unaligned(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - - memcpy_toio((void __iomem *)reg, &buf, last); + memcpy((u8*)reg, &buf, last); } } @@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev, struct nps_enet_tx_ctl tx_ctrl; short length = skb->len; u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); - u32 *src = (u32 *)virt_to_phys(skb->data); + u32 *src = (void *)skb->data; bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); tx_ctrl.value = 0; @@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev, if (src_is_aligned) for (i = 0; i < len; i++, src++) nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); - else { /* !src_is_aligned */ - for (i = 0; i < len; i++, src++) { - u32 buf; - - /* to accommodate word-unaligned address of "src" - * we have to do memcpy_fromio() instead of simple "=" - */ - memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf)); - nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf); - } - } + else /* !src_is_aligned */ + for (i = 0; i < len; i++, src++) + nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, + get_unaligned(src)); + /* Write the length of the Frame */ tx_ctrl.nt = length; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 6d0c5d5eea6d..84384e1585a5 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -71,7 +71,6 @@ struct ftgmac100 { struct napi_struct napi; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; struct phy_device *phydev; int old_speed; }; @@ -835,26 +834,15 @@ static void ftgmac100_adjust_link(struct net_device *netdev) static int ftgmac100_mii_probe(struct ftgmac100 *priv) { struct net_device *netdev = priv->netdev; - struct phy_device *phydev = NULL; - int i; - - /* search for connect PHY device */ - for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *tmp = priv->mii_bus->phy_map[i]; - - if (tmp) { - phydev = tmp; - break; - } - } + struct phy_device *phydev; - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_info(netdev, "%s: no PHY found\n", netdev->name); return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phydev)) { @@ -1188,7 +1176,6 @@ static int ftgmac100_probe(struct platform_device *pdev) struct net_device *netdev; struct ftgmac100 *priv; int err; - int i; if (!pdev) return -ENODEV; @@ -1257,10 +1244,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->mii_bus->priv = netdev; priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->write = ftgmac100_mdiobus_write; - priv->mii_bus->irq = priv->phy_irq; - - for (i = 0; i < PHY_MAX_ADDR; i++) - priv->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(priv->mii_bus); if (err) { diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index bee32a9d9876..d1ca45fbb164 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -54,6 +54,7 @@ config FEC_MPC52xx_MDIO If compiled as module, it will be called fec_mpc52xx_phy. source "drivers/net/ethernet/freescale/fs_enet/Kconfig" +source "drivers/net/ethernet/freescale/fman/Kconfig" config FSL_PQ_MDIO tristate "Freescale PQ MDIO" diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 71debd1c18c9..4097c58d17a7 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -17,3 +17,5 @@ gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o + +obj-$(CONFIG_FSL_FMAN) += fman/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2a32209ffbf..502da6f48f95 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -48,6 +48,7 @@ #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/mdio.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> @@ -1926,11 +1927,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if ((fep->mii_bus->phy_mask & (1 << phy_id))) - continue; - if (fep->mii_bus->phy_map[phy_id] == NULL) - continue; - if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) continue; if (dev_id--) continue; @@ -1972,9 +1969,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); + phy_attached_info(phy_dev); return 0; } @@ -1985,7 +1980,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *node; - int err = -ENXIO, i; + int err = -ENXIO; u32 mii_speed, holdtime; /* @@ -2067,15 +2062,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; - fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!fep->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - fep->mii_bus->irq[i] = PHY_POLL; - node = of_get_child_by_name(pdev->dev.of_node, "mdio"); if (node) { err = of_mdiobus_register(fep->mii_bus, node); @@ -2085,7 +2071,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; mii_cnt++; @@ -2095,8 +2081,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) return 0; -err_out_free_mdio_irq: - kfree(fep->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: @@ -2107,7 +2091,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) { if (--mii_cnt == 0) { mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); mdiobus_free(fep->mii_bus); } } @@ -3277,7 +3260,6 @@ static void fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) { struct device_node *np = pdev->dev.of_node; - int err; *num_tx = *num_rx = 1; @@ -3285,13 +3267,9 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) return; /* parse the num of tx and rx queues */ - err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); - if (err) - *num_tx = 1; + of_property_read_u32(np, "fsl,num-tx-queues", num_tx); - err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); - if (err) - *num_rx = 1; + of_property_read_u32(np, "fsl,num-rx-queues", num_rx); if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index afe7f39cdd7c..25553ee857b4 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1084,27 +1084,23 @@ static struct platform_driver mpc52xx_fec_driver = { /* Module */ /* ======================================================================== */ +static struct platform_driver * const drivers[] = { +#ifdef CONFIG_FEC_MPC52xx_MDIO + &mpc52xx_fec_mdio_driver, +#endif + &mpc52xx_fec_driver, +}; + static int __init mpc52xx_fec_init(void) { -#ifdef CONFIG_FEC_MPC52xx_MDIO - int ret; - ret = platform_driver_register(&mpc52xx_fec_mdio_driver); - if (ret) { - pr_err("failed to register mdio driver\n"); - return ret; - } -#endif - return platform_driver_register(&mpc52xx_fec_driver); + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit mpc52xx_fec_exit(void) { - platform_driver_unregister(&mpc52xx_fec_driver); -#ifdef CONFIG_FEC_MPC52xx_MDIO - platform_driver_unregister(&mpc52xx_fec_mdio_driver); -#endif + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 1e647beaf989..b5497e308302 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -22,7 +22,6 @@ struct mpc52xx_fec_mdio_priv { struct mpc52xx_fec __iomem *regs; - int mdio_irqs[PHY_MAX_ADDR]; }; static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, @@ -83,9 +82,6 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) bus->read = mpc52xx_fec_mdio_read; bus->write = mpc52xx_fec_mdio_write; - /* setup irqs */ - bus->irq = priv->mdio_irqs; - /* setup registers */ err = of_address_to_resource(np, 0, &res); if (err) diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig new file mode 100644 index 000000000000..79b7c84b7869 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -0,0 +1,9 @@ +config FSL_FMAN + tristate "FMan support" + depends on FSL_SOC || COMPILE_TEST + select GENERIC_ALLOCATOR + select PHYLIB + default n + help + Freescale Data-Path Acceleration Architecture Frame Manager + (FMan) support diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile new file mode 100644 index 000000000000..51fd2e6c1b84 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Makefile @@ -0,0 +1,7 @@ +subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman + +obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o + +fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o +fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o +fsl_mac-objs += mac.o diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c new file mode 100644 index 000000000000..623aa1c8ebc6 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -0,0 +1,2871 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman.h" +#include "fman_muram.h" + +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/clk.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/interrupt.h> +#include <linux/libfdt_env.h> + +/* General defines */ +#define FMAN_LIODN_TBL 64 /* size of LIODN table */ +#define MAX_NUM_OF_MACS 10 +#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 +#define BASE_RX_PORTID 0x08 +#define BASE_TX_PORTID 0x28 + +/* Modules registers offsets */ +#define BMI_OFFSET 0x00080000 +#define QMI_OFFSET 0x00080400 +#define DMA_OFFSET 0x000C2000 +#define FPM_OFFSET 0x000C3000 +#define IMEM_OFFSET 0x000C4000 +#define CGP_OFFSET 0x000DB000 + +/* Exceptions bit map */ +#define EX_DMA_BUS_ERROR 0x80000000 +#define EX_DMA_READ_ECC 0x40000000 +#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000 +#define EX_DMA_FM_WRITE_ECC 0x10000000 +#define EX_FPM_STALL_ON_TASKS 0x08000000 +#define EX_FPM_SINGLE_ECC 0x04000000 +#define EX_FPM_DOUBLE_ECC 0x02000000 +#define EX_QMI_SINGLE_ECC 0x01000000 +#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 +#define EX_QMI_DOUBLE_ECC 0x00400000 +#define EX_BMI_LIST_RAM_ECC 0x00200000 +#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000 +#define EX_BMI_STATISTICS_RAM_ECC 0x00080000 +#define EX_IRAM_ECC 0x00040000 +#define EX_MURAM_ECC 0x00020000 +#define EX_BMI_DISPATCH_RAM_ECC 0x00010000 +#define EX_DMA_SINGLE_PORT_ECC 0x00008000 + +/* DMA defines */ +/* masks */ +#define DMA_MODE_BER 0x00200000 +#define DMA_MODE_ECC 0x00000020 +#define DMA_MODE_SECURE_PROT 0x00000800 +#define DMA_MODE_AXI_DBG_MASK 0x0F000000 + +#define DMA_TRANSFER_PORTID_MASK 0xFF000000 +#define DMA_TRANSFER_TNUM_MASK 0x00FF0000 +#define DMA_TRANSFER_LIODN_MASK 0x00000FFF + +#define DMA_STATUS_BUS_ERR 0x08000000 +#define DMA_STATUS_READ_ECC 0x04000000 +#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000 +#define DMA_STATUS_FM_WRITE_ECC 0x01000000 +#define DMA_STATUS_FM_SPDAT_ECC 0x00080000 + +#define DMA_MODE_CACHE_OR_SHIFT 30 +#define DMA_MODE_AXI_DBG_SHIFT 24 +#define DMA_MODE_CEN_SHIFT 13 +#define DMA_MODE_CEN_MASK 0x00000007 +#define DMA_MODE_DBG_SHIFT 7 +#define DMA_MODE_AID_MODE_SHIFT 4 + +#define DMA_THRESH_COMMQ_SHIFT 24 +#define DMA_THRESH_READ_INT_BUF_SHIFT 16 +#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f +#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f + +#define DMA_TRANSFER_PORTID_SHIFT 24 +#define DMA_TRANSFER_TNUM_SHIFT 16 + +#define DMA_CAM_SIZEOF_ENTRY 0x40 +#define DMA_CAM_UNITS 8 + +#define DMA_LIODN_SHIFT 16 +#define DMA_LIODN_BASE_MASK 0x00000FFF + +/* FPM defines */ +#define FPM_EV_MASK_DOUBLE_ECC 0x80000000 +#define FPM_EV_MASK_STALL 0x40000000 +#define FPM_EV_MASK_SINGLE_ECC 0x20000000 +#define FPM_EV_MASK_RELEASE_FM 0x00010000 +#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000 +#define FPM_EV_MASK_STALL_EN 0x00004000 +#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000 +#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008 +#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004 + +#define FPM_RAM_MURAM_ECC 0x00008000 +#define FPM_RAM_IRAM_ECC 0x00004000 +#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000 +#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000 +#define FPM_RAM_IRAM_ECC_EN 0x40000000 +#define FPM_RAM_RAMS_ECC_EN 0x80000000 +#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000 + +#define FPM_REV1_MAJOR_MASK 0x0000FF00 +#define FPM_REV1_MINOR_MASK 0x000000FF + +#define FPM_DISP_LIMIT_SHIFT 24 + +#define FPM_PRT_FM_CTL1 0x00000001 +#define FPM_PRT_FM_CTL2 0x00000002 +#define FPM_PORT_FM_CTL_PORTID_SHIFT 24 +#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16 + +#define FPM_THR1_PRS_SHIFT 24 +#define FPM_THR1_KG_SHIFT 16 +#define FPM_THR1_PLCR_SHIFT 8 +#define FPM_THR1_BMI_SHIFT 0 + +#define FPM_THR2_QMI_ENQ_SHIFT 24 +#define FPM_THR2_QMI_DEQ_SHIFT 0 +#define FPM_THR2_FM_CTL1_SHIFT 16 +#define FPM_THR2_FM_CTL2_SHIFT 8 + +#define FPM_EV_MASK_CAT_ERR_SHIFT 1 +#define FPM_EV_MASK_DMA_ERR_SHIFT 0 + +#define FPM_REV1_MAJOR_SHIFT 8 + +#define FPM_RSTC_FM_RESET 0x80000000 +#define FPM_RSTC_MAC0_RESET 0x40000000 +#define FPM_RSTC_MAC1_RESET 0x20000000 +#define FPM_RSTC_MAC2_RESET 0x10000000 +#define FPM_RSTC_MAC3_RESET 0x08000000 +#define FPM_RSTC_MAC8_RESET 0x04000000 +#define FPM_RSTC_MAC4_RESET 0x02000000 +#define FPM_RSTC_MAC5_RESET 0x01000000 +#define FPM_RSTC_MAC6_RESET 0x00800000 +#define FPM_RSTC_MAC7_RESET 0x00400000 +#define FPM_RSTC_MAC9_RESET 0x00200000 + +#define FPM_TS_INT_SHIFT 16 +#define FPM_TS_CTL_EN 0x80000000 + +/* BMI defines */ +#define BMI_INIT_START 0x80000000 +#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000 +#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000 +#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000 +#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000 +#define BMI_NUM_OF_TASKS_MASK 0x3F000000 +#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000 +#define BMI_NUM_OF_DMAS_MASK 0x00000F00 +#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F +#define BMI_FIFO_SIZE_MASK 0x000003FF +#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000 +#define BMI_CFG2_DMAS_MASK 0x0000003F +#define BMI_CFG2_TASKS_MASK 0x0000003F + +#define BMI_CFG2_TASKS_SHIFT 16 +#define BMI_CFG2_DMAS_SHIFT 0 +#define BMI_CFG1_FIFO_SIZE_SHIFT 16 +#define BMI_NUM_OF_TASKS_SHIFT 24 +#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16 +#define BMI_NUM_OF_DMAS_SHIFT 8 +#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0 + +#define BMI_FIFO_ALIGN 0x100 + +#define BMI_EXTRA_FIFO_SIZE_SHIFT 16 + +/* QMI defines */ +#define QMI_CFG_ENQ_EN 0x80000000 +#define QMI_CFG_DEQ_EN 0x40000000 +#define QMI_CFG_EN_COUNTERS 0x10000000 +#define QMI_CFG_DEQ_MASK 0x0000003F +#define QMI_CFG_ENQ_MASK 0x00003F00 +#define QMI_CFG_ENQ_SHIFT 8 + +#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000 +#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000 +#define QMI_INTR_EN_SINGLE_ECC 0x80000000 + +#define QMI_GS_HALT_NOT_BUSY 0x00000002 + +/* IRAM defines */ +#define IRAM_IADD_AIE 0x80000000 +#define IRAM_READY 0x80000000 + +/* Default values */ +#define DEFAULT_CATASTROPHIC_ERR 0 +#define DEFAULT_DMA_ERR 0 +#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM +#define DEFAULT_DMA_COMM_Q_LOW 0x2A +#define DEFAULT_DMA_COMM_Q_HIGH 0x3F +#define DEFAULT_CACHE_OVERRIDE 0 +#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64 +#define DEFAULT_DMA_DBG_CNT_MODE 0 +#define DEFAULT_DMA_SOS_EMERGENCY 0 +#define DEFAULT_DMA_WATCHDOG 0 +#define DEFAULT_DISP_LIMIT 0 +#define DEFAULT_PRS_DISP_TH 16 +#define DEFAULT_PLCR_DISP_TH 16 +#define DEFAULT_KG_DISP_TH 16 +#define DEFAULT_BMI_DISP_TH 16 +#define DEFAULT_QMI_ENQ_DISP_TH 16 +#define DEFAULT_QMI_DEQ_DISP_TH 16 +#define DEFAULT_FM_CTL1_DISP_TH 16 +#define DEFAULT_FM_CTL2_DISP_TH 16 + +#define DFLT_AXI_DBG_NUM_OF_BEATS 1 + +#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) / 2) +#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) * 3 / 4) +#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) / 2) +#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\ + ((dma_thresh_max_buf + 1) * 3 / 4) + +#define DMA_COMM_Q_LOW_FMAN_V3 0x2A +#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \ + ((dma_thresh_max_commq + 1) / 2) +#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \ + ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \ + DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)) + +#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f +#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \ + ((dma_thresh_max_commq + 1) * 3 / 4) +#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \ + ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \ + DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)) + +#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59 +#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124 +#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \ + ((major == 6) ? ((minor == 1 || minor == 4) ? \ + TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \ + bmi_max_num_of_tasks) + +#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64 +#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32 +#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \ + (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \ + DMA_CAM_NUM_OF_ENTRIES_FMAN_V2) + +#define FM_TIMESTAMP_1_USEC_BIT 8 + +/* Defines used for enabling/disabling FMan interrupts */ +#define ERR_INTR_EN_DMA 0x00010000 +#define ERR_INTR_EN_FPM 0x80000000 +#define ERR_INTR_EN_BMI 0x00800000 +#define ERR_INTR_EN_QMI 0x00400000 +#define ERR_INTR_EN_MURAM 0x00040000 +#define ERR_INTR_EN_MAC0 0x00004000 +#define ERR_INTR_EN_MAC1 0x00002000 +#define ERR_INTR_EN_MAC2 0x00001000 +#define ERR_INTR_EN_MAC3 0x00000800 +#define ERR_INTR_EN_MAC4 0x00000400 +#define ERR_INTR_EN_MAC5 0x00000200 +#define ERR_INTR_EN_MAC6 0x00000100 +#define ERR_INTR_EN_MAC7 0x00000080 +#define ERR_INTR_EN_MAC8 0x00008000 +#define ERR_INTR_EN_MAC9 0x00000040 + +#define INTR_EN_QMI 0x40000000 +#define INTR_EN_MAC0 0x00080000 +#define INTR_EN_MAC1 0x00040000 +#define INTR_EN_MAC2 0x00020000 +#define INTR_EN_MAC3 0x00010000 +#define INTR_EN_MAC4 0x00000040 +#define INTR_EN_MAC5 0x00000020 +#define INTR_EN_MAC6 0x00000008 +#define INTR_EN_MAC7 0x00000002 +#define INTR_EN_MAC8 0x00200000 +#define INTR_EN_MAC9 0x00100000 +#define INTR_EN_REV0 0x00008000 +#define INTR_EN_REV1 0x00004000 +#define INTR_EN_REV2 0x00002000 +#define INTR_EN_REV3 0x00001000 +#define INTR_EN_TMR 0x01000000 + +enum fman_dma_aid_mode { + FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */ + FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */ +}; + +struct fman_iram_regs { + u32 iadd; /* FM IRAM instruction address register */ + u32 idata; /* FM IRAM instruction data register */ + u32 itcfg; /* FM IRAM timing config register */ + u32 iready; /* FM IRAM ready register */ +}; + +struct fman_fpm_regs { + u32 fmfp_tnc; /* FPM TNUM Control 0x00 */ + u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */ + u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */ + u32 fmfp_mxd; /* FPM Flush Control 0x0c */ + u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */ + u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */ + u32 fm_epi; /* FM Error Pending Interrupts 0x18 */ + u32 fm_rie; /* FM Error Interrupt Enable 0x1c */ + u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */ + u32 res0030[4]; /* res 0x30 - 0x3f */ + u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */ + u32 res0050[4]; /* res 0x50-0x5f */ + u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */ + u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */ + u32 fmfp_tsp; /* FPM Time Stamp 0x68 */ + u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */ + u32 fm_rcr; /* FM Rams Control 0x70 */ + u32 fmfp_extc; /* FPM External Requests Control 0x74 */ + u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */ + u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */ + u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */ + u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */ + u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */ + u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */ + u32 fm_rstc; /* FM Reset Command 0xcc */ + u32 fm_cld; /* FM Classifier Debug 0xd0 */ + u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */ + u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */ + u32 fmfp_ee; /* FPM Event&Mask 0xdc */ + u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */ + u32 res00f0[4]; /* res 0xf0-0xff */ + u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */ + u32 res01c8[14]; /* res 0x1c8-0x1ff */ + u32 fmfp_clfabc; /* FPM CLFABC 0x200 */ + u32 fmfp_clfcc; /* FPM CLFCC 0x204 */ + u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */ + u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */ + u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */ + u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */ + u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */ + u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */ + u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */ + u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */ + u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */ + u32 fmfp_decceh; /* FPM DECCEH 0x22c */ + u32 res0230[116]; /* res 0x230 - 0x3ff */ + u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */ + u32 res0600[0x400 - 384]; +}; + +struct fman_bmi_regs { + u32 fmbm_init; /* BMI Initialization 0x00 */ + u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */ + u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */ + u32 res000c[5]; /* 0x0c - 0x1f */ + u32 fmbm_ievr; /* Interrupt Event Register 0x20 */ + u32 fmbm_ier; /* Interrupt Enable Register 0x24 */ + u32 fmbm_ifr; /* Interrupt Force Register 0x28 */ + u32 res002c[5]; /* 0x2c - 0x3f */ + u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */ + u32 res0060[12]; /* 0x60 - 0x8f */ + u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */ + u32 res009c; /* 0x9c */ + u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */ + u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */ + u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */ + u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */ + u32 res0200; /* 0x200 */ + u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */ + u32 res0300; /* 0x300 */ + u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */ +}; + +struct fman_qmi_regs { + u32 fmqm_gc; /* General Configuration Register 0x00 */ + u32 res0004; /* 0x04 */ + u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */ + u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */ + u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */ + u32 fmqm_ie; /* Interrupt Event Register 0x14 */ + u32 fmqm_ien; /* Interrupt Enable Register 0x18 */ + u32 fmqm_if; /* Interrupt Force Register 0x1c */ + u32 fmqm_gs; /* Global Status Register 0x20 */ + u32 fmqm_ts; /* Task Status Register 0x24 */ + u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */ + u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */ + u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */ + u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */ + u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */ + u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */ + u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */ + u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */ + u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */ + u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */ + u32 res0050[7]; /* 0x50 - 0x6b */ + u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */ + u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */ + u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */ + u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */ + u32 res007c; /* 0x7c */ + u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */ + u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */ + u32 res0088[2]; /* 0x88 - 0x8f */ + struct { + u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */ + u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */ + u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */ + u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */ + u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */ + u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */ + u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */ + u32 res001c; /* 0x1c */ + } dbg_traps[3]; /* 0x90 - 0xef */ + u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */ +}; + +struct fman_dma_regs { + u32 fmdmsr; /* FM DMA status register 0x00 */ + u32 fmdmmr; /* FM DMA mode register 0x04 */ + u32 fmdmtr; /* FM DMA bus threshold register 0x08 */ + u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */ + u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */ + u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */ + u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */ + u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */ + u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */ + u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */ + u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */ + u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */ + u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */ + u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */ + u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */ + u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */ + u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */ + u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */ + u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */ + u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */ + u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */ + u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */ + u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */ + u32 res005c; /* 0x5c */ + u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */ + u32 res00e0[0x400 - 56]; +}; + +/* Structure that holds current FMan state. + * Used for saving run time information. + */ +struct fman_state_struct { + u8 fm_id; + u16 fm_clk_freq; + struct fman_rev_info rev_info; + bool enabled_time_stamp; + u8 count1_micro_bit; + u8 total_num_of_tasks; + u8 accumulated_num_of_tasks; + u32 accumulated_fifo_size; + u8 accumulated_num_of_open_dmas; + u8 accumulated_num_of_deq_tnums; + u32 exceptions; + u32 extra_fifo_pool_size; + u8 extra_tasks_pool_size; + u8 extra_open_dmas_pool_size; + u16 port_mfl[MAX_NUM_OF_MACS]; + u16 mac_mfl[MAX_NUM_OF_MACS]; + + /* SOC specific */ + u32 fm_iram_size; + /* DMA */ + u32 dma_thresh_max_commq; + u32 dma_thresh_max_buf; + u32 max_num_of_open_dmas; + /* QMI */ + u32 qmi_max_num_of_tnums; + u32 qmi_def_tnums_thresh; + /* BMI */ + u32 bmi_max_num_of_tasks; + u32 bmi_max_fifo_size; + /* General */ + u32 fm_port_num_of_cg; + u32 num_of_rx_ports; + u32 total_fifo_size; + + u32 qman_channel_base; + u32 num_of_qman_channels; + + struct resource *res; +}; + +/* Structure that holds FMan initial configuration */ +struct fman_cfg { + u8 disp_limit_tsh; + u8 prs_disp_tsh; + u8 plcr_disp_tsh; + u8 kg_disp_tsh; + u8 bmi_disp_tsh; + u8 qmi_enq_disp_tsh; + u8 qmi_deq_disp_tsh; + u8 fm_ctl1_disp_tsh; + u8 fm_ctl2_disp_tsh; + int dma_cache_override; + enum fman_dma_aid_mode dma_aid_mode; + u32 dma_axi_dbg_num_of_beats; + u32 dma_cam_num_of_entries; + u32 dma_watchdog; + u8 dma_comm_qtsh_asrt_emer; + u32 dma_write_buf_tsh_asrt_emer; + u32 dma_read_buf_tsh_asrt_emer; + u8 dma_comm_qtsh_clr_emer; + u32 dma_write_buf_tsh_clr_emer; + u32 dma_read_buf_tsh_clr_emer; + u32 dma_sos_emergency; + int dma_dbg_cnt_mode; + int catastrophic_err; + int dma_err; + u32 exceptions; + u16 clk_freq; + u32 cam_base_addr; + u32 fifo_base_addr; + u32 total_fifo_size; + u32 total_num_of_tasks; + u32 qmi_def_tnums_thresh; +}; + +/* Structure that holds information received from device tree */ +struct fman_dts_params { + void __iomem *base_addr; /* FMan virtual address */ + struct resource *res; /* FMan memory resource */ + u8 id; /* FMan ID */ + + int err_irq; /* FMan Error IRQ */ + + u16 clk_freq; /* FMan clock freq (In Mhz) */ + + u32 qman_channel_base; /* QMan channels base */ + u32 num_of_qman_channels; /* Number of QMan channels */ + + struct resource muram_res; /* MURAM resource */ +}; + +/** fman_exceptions_cb + * fman - Pointer to FMan + * exception - The exception. + * + * Exceptions user callback routine, will be called upon an exception + * passing the exception identification. + * + * Return: irq status + */ +typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman, + enum fman_exceptions exception); + +/** fman_bus_error_cb + * fman - Pointer to FMan + * port_id - Port id + * addr - Address that caused the error + * tnum - Owner of error + * liodn - Logical IO device number + * + * Bus error user callback routine, will be called upon bus error, + * passing parameters describing the errors and the owner. + * + * Return: IRQ status + */ +typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id, + u64 addr, u8 tnum, u16 liodn); + +struct fman { + struct device *dev; + void __iomem *base_addr; + struct fman_intr_src intr_mng[FMAN_EV_CNT]; + + struct fman_fpm_regs __iomem *fpm_regs; + struct fman_bmi_regs __iomem *bmi_regs; + struct fman_qmi_regs __iomem *qmi_regs; + struct fman_dma_regs __iomem *dma_regs; + fman_exceptions_cb *exception_cb; + fman_bus_error_cb *bus_error_cb; + /* Spinlock for FMan use */ + spinlock_t spinlock; + struct fman_state_struct *state; + + struct fman_cfg *cfg; + struct muram_info *muram; + /* cam section in muram */ + int cam_offset; + size_t cam_size; + /* Fifo in MURAM */ + int fifo_offset; + size_t fifo_size; + + u32 liodn_base[64]; + u32 liodn_offset[64]; + + struct fman_dts_params dts_params; +}; + +static irqreturn_t fman_exceptions(struct fman *fman, + enum fman_exceptions exception) +{ + dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n", + __func__, fman->state->fm_id, exception); + + return IRQ_HANDLED; +} + +static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id, + u64 __maybe_unused addr, + u8 __maybe_unused tnum, + u16 __maybe_unused liodn) +{ + dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n", + __func__, fman->state->fm_id, port_id); + + return IRQ_HANDLED; +} + +static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id) +{ + if (fman->intr_mng[id].isr_cb) { + fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id) +{ + u8 sw_port_id = 0; + + if (hw_port_id >= BASE_TX_PORTID) + sw_port_id = hw_port_id - BASE_TX_PORTID; + else if (hw_port_id >= BASE_RX_PORTID) + sw_port_id = hw_port_id - BASE_RX_PORTID; + else + sw_port_id = 0; + + return sw_port_id; +} + +static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg, + u8 port_id) +{ + u32 tmp = 0; + + tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT; + + tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1; + + /* order restoration */ + if (port_id % 2) + tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; + else + tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; + + iowrite32be(tmp, &fpm_rg->fmfp_prc); +} + +static void set_port_liodn(struct fman *fman, u8 port_id, + u32 liodn_base, u32 liodn_ofst) +{ + u32 tmp; + + /* set LIODN base for this port */ + tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]); + if (port_id % 2) { + tmp &= ~DMA_LIODN_BASE_MASK; + tmp |= liodn_base; + } else { + tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT); + tmp |= liodn_base << DMA_LIODN_SHIFT; + } + iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]); + iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]); +} + +static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fm_rcr); + if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) + iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); + else + iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN | + FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); +} + +static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fm_rcr); + if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) + iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); + else + iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN), + &fpm_rg->fm_rcr); +} + +static void fman_defconfig(struct fman_cfg *cfg) +{ + memset(cfg, 0, sizeof(struct fman_cfg)); + + cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR; + cfg->dma_err = DEFAULT_DMA_ERR; + cfg->dma_aid_mode = DEFAULT_AID_MODE; + cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW; + cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH; + cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE; + cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES; + cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE; + cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY; + cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG; + cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT; + cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH; + cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH; + cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH; + cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH; + cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH; + cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH; + cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH; + cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH; +} + +static int dma_init(struct fman *fman) +{ + struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; + struct fman_cfg *cfg = fman->cfg; + u32 tmp_reg; + + /* Init DMA Registers */ + + /* clear status reg events */ + tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC | + DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC); + iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr); + + /* configure mode register */ + tmp_reg = 0; + tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT; + if (cfg->exceptions & EX_DMA_BUS_ERROR) + tmp_reg |= DMA_MODE_BER; + if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) | + (cfg->exceptions & EX_DMA_READ_ECC) | + (cfg->exceptions & EX_DMA_FM_WRITE_ECC)) + tmp_reg |= DMA_MODE_ECC; + if (cfg->dma_axi_dbg_num_of_beats) + tmp_reg |= (DMA_MODE_AXI_DBG_MASK & + ((cfg->dma_axi_dbg_num_of_beats - 1) + << DMA_MODE_AXI_DBG_SHIFT)); + + tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) & + DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT; + tmp_reg |= DMA_MODE_SECURE_PROT; + tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT; + tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT; + + iowrite32be(tmp_reg, &dma_rg->fmdmmr); + + /* configure thresholds register */ + tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer << + DMA_THRESH_COMMQ_SHIFT); + tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer & + DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; + tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer & + DMA_THRESH_WRITE_INT_BUF_MASK; + + iowrite32be(tmp_reg, &dma_rg->fmdmtr); + + /* configure hysteresis register */ + tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer << + DMA_THRESH_COMMQ_SHIFT); + tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer & + DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; + tmp_reg |= cfg->dma_write_buf_tsh_clr_emer & + DMA_THRESH_WRITE_INT_BUF_MASK; + + iowrite32be(tmp_reg, &dma_rg->fmdmhy); + + /* configure emergency threshold */ + iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr); + + /* configure Watchdog */ + iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr); + + iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr); + + /* Allocate MURAM for CAM */ + fman->cam_size = + (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY); + fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", + __func__); + return -ENOMEM; + } + + if (fman->state->rev_info.major == 2) { + u32 __iomem *cam_base_addr; + + fman_muram_free_mem(fman->muram, fman->cam_offset, + fman->cam_size); + + fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128; + fman->cam_offset = fman_muram_alloc(fman->muram, + fman->cam_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", + __func__); + return -ENOMEM; + } + + if (fman->cfg->dma_cam_num_of_entries % 8 || + fman->cfg->dma_cam_num_of_entries > 32) { + dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n", + __func__); + return -EINVAL; + } + + cam_base_addr = (u32 __iomem *) + fman_muram_offset_to_vbase(fman->muram, + fman->cam_offset); + iowrite32be(~((1 << + (32 - fman->cfg->dma_cam_num_of_entries)) - 1), + cam_base_addr); + } + + fman->cfg->cam_base_addr = fman->cam_offset; + + return 0; +} + +static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg) +{ + u32 tmp_reg; + int i; + + /* Init FPM Registers */ + + tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT); + iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd); + + tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) | + ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) | + ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) | + ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT)); + iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1); + + tmp_reg = + (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) | + ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) | + ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) | + ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT)); + iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2); + + /* define exceptions and error behavior */ + tmp_reg = 0; + /* Clear events */ + tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC | + FPM_EV_MASK_SINGLE_ECC); + /* enable interrupts */ + if (cfg->exceptions & EX_FPM_STALL_ON_TASKS) + tmp_reg |= FPM_EV_MASK_STALL_EN; + if (cfg->exceptions & EX_FPM_SINGLE_ECC) + tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN; + if (cfg->exceptions & EX_FPM_DOUBLE_ECC) + tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN; + tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT); + tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT); + /* FMan is not halted upon external halt activation */ + tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT; + /* Man is not halted upon Unrecoverable ECC error behavior */ + tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT; + iowrite32be(tmp_reg, &fpm_rg->fmfp_ee); + + /* clear all fmCtls event registers */ + for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++) + iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]); + + /* RAM ECC - enable and clear events */ + /* first we need to clear all parser memory, + * as it is uninitialized and may cause ECC errors + */ + /* event bits */ + tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC); + + iowrite32be(tmp_reg, &fpm_rg->fm_rcr); + + tmp_reg = 0; + if (cfg->exceptions & EX_IRAM_ECC) { + tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN; + enable_rams_ecc(fpm_rg); + } + if (cfg->exceptions & EX_MURAM_ECC) { + tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN; + enable_rams_ecc(fpm_rg); + } + iowrite32be(tmp_reg, &fpm_rg->fm_rie); +} + +static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg, + struct fman_cfg *cfg) +{ + u32 tmp_reg; + + /* Init BMI Registers */ + + /* define common resources */ + tmp_reg = cfg->fifo_base_addr; + tmp_reg = tmp_reg / BMI_FIFO_ALIGN; + + tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) << + BMI_CFG1_FIFO_SIZE_SHIFT); + iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1); + + tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) << + BMI_CFG2_TASKS_SHIFT; + /* num of DMA's will be dynamically updated when each port is set */ + iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2); + + /* define unmaskable exceptions, enable and clear events */ + tmp_reg = 0; + iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC | + BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC | + BMI_ERR_INTR_EN_STATISTICS_RAM_ECC | + BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr); + + if (cfg->exceptions & EX_BMI_LIST_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC) + tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + iowrite32be(tmp_reg, &bmi_rg->fmbm_ier); +} + +static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, + struct fman_cfg *cfg) +{ + u32 tmp_reg; + + /* Init QMI Registers */ + + /* Clear error interrupt events */ + + iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF, + &qmi_rg->fmqm_eie); + tmp_reg = 0; + if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID) + tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + if (cfg->exceptions & EX_QMI_DOUBLE_ECC) + tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC; + /* enable events */ + iowrite32be(tmp_reg, &qmi_rg->fmqm_eien); + + tmp_reg = 0; + /* Clear interrupt events */ + iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie); + if (cfg->exceptions & EX_QMI_SINGLE_ECC) + tmp_reg |= QMI_INTR_EN_SINGLE_ECC; + /* enable events */ + iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); +} + +static int enable(struct fman *fman, struct fman_cfg *cfg) +{ + u32 cfg_reg = 0; + + /* Enable all modules */ + + /* clear&enable global counters - calculate reg and save for later, + * because it's the same reg for QMI enable + */ + cfg_reg = QMI_CFG_EN_COUNTERS; + + /* Set enqueue and dequeue thresholds */ + cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh; + + iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init); + iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN, + &fman->qmi_regs->fmqm_gc); + + return 0; +} + +static int set_exception(struct fman *fman, + enum fman_exceptions exception, bool enable) +{ + u32 tmp; + + switch (exception) { + case FMAN_EX_DMA_BUS_ERROR: + tmp = ioread32be(&fman->dma_regs->fmdmmr); + if (enable) + tmp |= DMA_MODE_BER; + else + tmp &= ~DMA_MODE_BER; + /* disable bus error */ + iowrite32be(tmp, &fman->dma_regs->fmdmmr); + break; + case FMAN_EX_DMA_READ_ECC: + case FMAN_EX_DMA_SYSTEM_WRITE_ECC: + case FMAN_EX_DMA_FM_WRITE_ECC: + tmp = ioread32be(&fman->dma_regs->fmdmmr); + if (enable) + tmp |= DMA_MODE_ECC; + else + tmp &= ~DMA_MODE_ECC; + iowrite32be(tmp, &fman->dma_regs->fmdmmr); + break; + case FMAN_EX_FPM_STALL_ON_TASKS: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_STALL_EN; + else + tmp &= ~FPM_EV_MASK_STALL_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_FPM_SINGLE_ECC: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_SINGLE_ECC_EN; + else + tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_FPM_DOUBLE_ECC: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_DOUBLE_ECC_EN; + else + tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_QMI_SINGLE_ECC: + tmp = ioread32be(&fman->qmi_regs->fmqm_ien); + if (enable) + tmp |= QMI_INTR_EN_SINGLE_ECC; + else + tmp &= ~QMI_INTR_EN_SINGLE_ECC; + iowrite32be(tmp, &fman->qmi_regs->fmqm_ien); + break; + case FMAN_EX_QMI_DOUBLE_ECC: + tmp = ioread32be(&fman->qmi_regs->fmqm_eien); + if (enable) + tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC; + else + tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC; + iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); + break; + case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: + tmp = ioread32be(&fman->qmi_regs->fmqm_eien); + if (enable) + tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + else + tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF; + iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); + break; + case FMAN_EX_BMI_LIST_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_STORAGE_PROFILE_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_STATISTICS_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_DISPATCH_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_IRAM_ECC: + tmp = ioread32be(&fman->fpm_regs->fm_rie); + if (enable) { + /* enable ECC if not enabled */ + enable_rams_ecc(fman->fpm_regs); + /* enable ECC interrupts */ + tmp |= FPM_IRAM_ECC_ERR_EX_EN; + } else { + /* ECC mechanism may be disabled, + * depending on driver status + */ + disable_rams_ecc(fman->fpm_regs); + tmp &= ~FPM_IRAM_ECC_ERR_EX_EN; + } + iowrite32be(tmp, &fman->fpm_regs->fm_rie); + break; + case FMAN_EX_MURAM_ECC: + tmp = ioread32be(&fman->fpm_regs->fm_rie); + if (enable) { + /* enable ECC if not enabled */ + enable_rams_ecc(fman->fpm_regs); + /* enable ECC interrupts */ + tmp |= FPM_MURAM_ECC_ERR_EX_EN; + } else { + /* ECC mechanism may be disabled, + * depending on driver status + */ + disable_rams_ecc(fman->fpm_regs); + tmp &= ~FPM_MURAM_ECC_ERR_EX_EN; + } + iowrite32be(tmp, &fman->fpm_regs->fm_rie); + break; + default: + return -EINVAL; + } + return 0; +} + +static void resume(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fmfp_ee); + /* clear tmp_reg event bits in order not to clear standing events */ + tmp &= ~(FPM_EV_MASK_DOUBLE_ECC | + FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC); + tmp |= FPM_EV_MASK_RELEASE_FM; + + iowrite32be(tmp, &fpm_rg->fmfp_ee); +} + +static int fill_soc_specific_params(struct fman_state_struct *state) +{ + u8 minor = state->rev_info.minor; + /* P4080 - Major 2 + * P2041/P3041/P5020/P5040 - Major 3 + * Tx/Bx - Major 6 + */ + switch (state->rev_info.major) { + case 3: + state->bmi_max_fifo_size = 160 * 1024; + state->fm_iram_size = 64 * 1024; + state->dma_thresh_max_commq = 31; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 48; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 32; + state->fm_port_num_of_cg = 256; + state->num_of_rx_ports = 6; + state->total_fifo_size = 122 * 1024; + break; + + case 2: + state->bmi_max_fifo_size = 160 * 1024; + state->fm_iram_size = 64 * 1024; + state->dma_thresh_max_commq = 31; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 48; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 32; + state->fm_port_num_of_cg = 256; + state->num_of_rx_ports = 5; + state->total_fifo_size = 100 * 1024; + break; + + case 6: + state->dma_thresh_max_commq = 83; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 32; + state->fm_port_num_of_cg = 256; + + /* FManV3L */ + if (minor == 1 || minor == 4) { + state->bmi_max_fifo_size = 192 * 1024; + state->bmi_max_num_of_tasks = 64; + state->max_num_of_open_dmas = 32; + state->num_of_rx_ports = 5; + if (minor == 1) + state->fm_iram_size = 32 * 1024; + else + state->fm_iram_size = 64 * 1024; + state->total_fifo_size = 156 * 1024; + } + /* FManV3H */ + else if (minor == 0 || minor == 2 || minor == 3) { + state->bmi_max_fifo_size = 384 * 1024; + state->fm_iram_size = 64 * 1024; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 84; + state->num_of_rx_ports = 8; + state->total_fifo_size = 295 * 1024; + } else { + pr_err("Unsupported FManv3 version\n"); + return -EINVAL; + } + + break; + default: + pr_err("Unsupported FMan version\n"); + return -EINVAL; + } + + return 0; +} + +static bool is_init_done(struct fman_cfg *cfg) +{ + /* Checks if FMan driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +static void free_init_resources(struct fman *fman) +{ + if (fman->cam_offset) + fman_muram_free_mem(fman->muram, fman->cam_offset, + fman->cam_size); + if (fman->fifo_offset) + fman_muram_free_mem(fman->muram, fman->fifo_offset, + fman->fifo_size); +} + +static irqreturn_t bmi_err_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&bmi_rg->fmbm_ievr); + mask = ioread32be(&bmi_rg->fmbm_ier); + event &= mask; + /* clear the forced events */ + force = ioread32be(&bmi_rg->fmbm_ifr); + if (force & event) + iowrite32be(force & ~event, &bmi_rg->fmbm_ifr); + /* clear the acknowledged events */ + iowrite32be(event, &bmi_rg->fmbm_ievr); + + if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC); + if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC); + if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC); + if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC); + + return ret; +} + +static irqreturn_t qmi_err_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&qmi_rg->fmqm_eie); + mask = ioread32be(&qmi_rg->fmqm_eien); + event &= mask; + + /* clear the forced events */ + force = ioread32be(&qmi_rg->fmqm_eif); + if (force & event) + iowrite32be(force & ~event, &qmi_rg->fmqm_eif); + /* clear the acknowledged events */ + iowrite32be(event, &qmi_rg->fmqm_eie); + + if (event & QMI_ERR_INTR_EN_DOUBLE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC); + if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) + ret = fman->exception_cb(fman, + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID); + + return ret; +} + +static irqreturn_t dma_err_event(struct fman *fman) +{ + u32 status, mask, com_id; + u8 tnum, port_id, relative_port_id; + u16 liodn; + struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; + irqreturn_t ret = IRQ_NONE; + + status = ioread32be(&dma_rg->fmdmsr); + mask = ioread32be(&dma_rg->fmdmmr); + + /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */ + if ((mask & DMA_MODE_BER) != DMA_MODE_BER) + status &= ~DMA_STATUS_BUS_ERR; + + /* clear relevant bits if mask has no DMA_MODE_ECC */ + if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC) + status &= ~(DMA_STATUS_FM_SPDAT_ECC | + DMA_STATUS_READ_ECC | + DMA_STATUS_SYSTEM_WRITE_ECC | + DMA_STATUS_FM_WRITE_ECC); + + /* clear set events */ + iowrite32be(status, &dma_rg->fmdmsr); + + if (status & DMA_STATUS_BUS_ERR) { + u64 addr; + + addr = (u64)ioread32be(&dma_rg->fmdmtal); + addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32); + + com_id = ioread32be(&dma_rg->fmdmtcid); + port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >> + DMA_TRANSFER_PORTID_SHIFT)); + relative_port_id = + hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); + tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >> + DMA_TRANSFER_TNUM_SHIFT); + liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK); + ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum, + liodn); + } + if (status & DMA_STATUS_FM_SPDAT_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC); + if (status & DMA_STATUS_READ_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC); + if (status & DMA_STATUS_SYSTEM_WRITE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC); + if (status & DMA_STATUS_FM_WRITE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC); + + return ret; +} + +static irqreturn_t fpm_err_event(struct fman *fman) +{ + u32 event; + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&fpm_rg->fmfp_ee); + /* clear the all occurred events */ + iowrite32be(event, &fpm_rg->fmfp_ee); + + if ((event & FPM_EV_MASK_DOUBLE_ECC) && + (event & FPM_EV_MASK_DOUBLE_ECC_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC); + if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS); + if ((event & FPM_EV_MASK_SINGLE_ECC) && + (event & FPM_EV_MASK_SINGLE_ECC_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC); + + return ret; +} + +static irqreturn_t muram_err_intr(struct fman *fman) +{ + u32 event, mask; + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&fpm_rg->fm_rcr); + mask = ioread32be(&fpm_rg->fm_rie); + + /* clear MURAM event bit (do not clear IRAM event) */ + iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr); + + if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC)) + ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC); + + return ret; +} + +static irqreturn_t qmi_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&qmi_rg->fmqm_ie); + mask = ioread32be(&qmi_rg->fmqm_ien); + event &= mask; + /* clear the forced events */ + force = ioread32be(&qmi_rg->fmqm_if); + if (force & event) + iowrite32be(force & ~event, &qmi_rg->fmqm_if); + /* clear the acknowledged events */ + iowrite32be(event, &qmi_rg->fmqm_ie); + + if (event & QMI_INTR_EN_SINGLE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC); + + return ret; +} + +static void enable_time_stamp(struct fman *fman) +{ + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u16 fm_clk_freq = fman->state->fm_clk_freq; + u32 tmp, intgr, ts_freq; + u64 frac; + + ts_freq = (u32)(1 << fman->state->count1_micro_bit); + /* configure timestamp so that bit 8 will count 1 microsecond + * Find effective count rate at TIMESTAMP least significant bits: + * Effective_Count_Rate = 1MHz x 2^8 = 256MHz + * Find frequency ratio between effective count rate and the clock: + * Effective_Count_Rate / CLK e.g. for 600 MHz clock: + * 256/600 = 0.4266666... + */ + + intgr = ts_freq / fm_clk_freq; + /* we multiply by 2^16 to keep the fraction of the division + * we do not div back, since we write this value as a fraction + * see spec + */ + + frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq; + /* we check remainder of the division in order to round up if not int */ + if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq) + frac++; + + tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac; + iowrite32be(tmp, &fpm_rg->fmfp_tsc2); + + /* enable timestamp with original clock */ + iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1); + fman->state->enabled_time_stamp = true; +} + +static int clear_iram(struct fman *fman) +{ + struct fman_iram_regs __iomem *iram; + int i, count; + + iram = fman->base_addr + IMEM_OFFSET; + + /* Enable the auto-increment */ + iowrite32be(IRAM_IADD_AIE, &iram->iadd); + count = 100; + do { + udelay(1); + } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count); + if (count == 0) + return -EBUSY; + + for (i = 0; i < (fman->state->fm_iram_size / 4); i++) + iowrite32be(0xffffffff, &iram->idata); + + iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd); + count = 100; + do { + udelay(1); + } while ((ioread32be(&iram->idata) != 0xffffffff) && --count); + if (count == 0) + return -EBUSY; + + return 0; +} + +static u32 get_exception_flag(enum fman_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FMAN_EX_DMA_BUS_ERROR: + bit_mask = EX_DMA_BUS_ERROR; + break; + case FMAN_EX_DMA_SINGLE_PORT_ECC: + bit_mask = EX_DMA_SINGLE_PORT_ECC; + break; + case FMAN_EX_DMA_READ_ECC: + bit_mask = EX_DMA_READ_ECC; + break; + case FMAN_EX_DMA_SYSTEM_WRITE_ECC: + bit_mask = EX_DMA_SYSTEM_WRITE_ECC; + break; + case FMAN_EX_DMA_FM_WRITE_ECC: + bit_mask = EX_DMA_FM_WRITE_ECC; + break; + case FMAN_EX_FPM_STALL_ON_TASKS: + bit_mask = EX_FPM_STALL_ON_TASKS; + break; + case FMAN_EX_FPM_SINGLE_ECC: + bit_mask = EX_FPM_SINGLE_ECC; + break; + case FMAN_EX_FPM_DOUBLE_ECC: + bit_mask = EX_FPM_DOUBLE_ECC; + break; + case FMAN_EX_QMI_SINGLE_ECC: + bit_mask = EX_QMI_SINGLE_ECC; + break; + case FMAN_EX_QMI_DOUBLE_ECC: + bit_mask = EX_QMI_DOUBLE_ECC; + break; + case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: + bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID; + break; + case FMAN_EX_BMI_LIST_RAM_ECC: + bit_mask = EX_BMI_LIST_RAM_ECC; + break; + case FMAN_EX_BMI_STORAGE_PROFILE_ECC: + bit_mask = EX_BMI_STORAGE_PROFILE_ECC; + break; + case FMAN_EX_BMI_STATISTICS_RAM_ECC: + bit_mask = EX_BMI_STATISTICS_RAM_ECC; + break; + case FMAN_EX_BMI_DISPATCH_RAM_ECC: + bit_mask = EX_BMI_DISPATCH_RAM_ECC; + break; + case FMAN_EX_MURAM_ECC: + bit_mask = EX_MURAM_ECC; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static int get_module_event(enum fman_event_modules module, u8 mod_id, + enum fman_intr_type intr_type) +{ + int event; + + switch (module) { + case FMAN_MOD_MAC: + if (intr_type == FMAN_INTR_TYPE_ERR) + event = FMAN_EV_ERR_MAC0 + mod_id; + else + event = FMAN_EV_MAC0 + mod_id; + break; + case FMAN_MOD_FMAN_CTRL: + if (intr_type == FMAN_INTR_TYPE_ERR) + event = FMAN_EV_CNT; + else + event = (FMAN_EV_FMAN_CTRL_0 + mod_id); + break; + case FMAN_MOD_DUMMY_LAST: + event = FMAN_EV_CNT; + break; + default: + event = FMAN_EV_CNT; + break; + } + + return event; +} + +static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo, + u32 *extra_size_of_fifo) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u32 fifo = *size_of_fifo; + u32 extra_fifo = *extra_size_of_fifo; + u32 tmp; + + /* if this is the first time a port requires extra_fifo_pool_size, + * the total extra_fifo_pool_size must be initialized to 1 buffer per + * port + */ + if (extra_fifo && !fman->state->extra_fifo_pool_size) + fman->state->extra_fifo_pool_size = + fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS; + + fman->state->extra_fifo_pool_size = + max(fman->state->extra_fifo_pool_size, extra_fifo); + + /* check that there are enough uncommitted fifo size */ + if ((fman->state->accumulated_fifo_size + fifo) > + (fman->state->total_fifo_size - + fman->state->extra_fifo_pool_size)) { + dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n", + __func__); + return -EAGAIN; + } + + /* Read, modify and write to HW */ + tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) | + ((extra_fifo / FMAN_BMI_FIFO_UNITS) << + BMI_EXTRA_FIFO_SIZE_SHIFT); + iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]); + + /* update accumulated */ + fman->state->accumulated_fifo_size += fifo; + + return 0; +} + +static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks, + u8 *num_of_extra_tasks) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u8 tasks = *num_of_tasks; + u8 extra_tasks = *num_of_extra_tasks; + u32 tmp; + + if (extra_tasks) + fman->state->extra_tasks_pool_size = + max(fman->state->extra_tasks_pool_size, extra_tasks); + + /* check that there are enough uncommitted tasks */ + if ((fman->state->accumulated_num_of_tasks + tasks) > + (fman->state->total_num_of_tasks - + fman->state->extra_tasks_pool_size)) { + dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n", + __func__, fman->state->fm_id); + return -EAGAIN; + } + /* update accumulated */ + fman->state->accumulated_num_of_tasks += tasks; + + /* Write to HW */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & + ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK); + tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) | + (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT)); + iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); + + return 0; +} + +static int set_num_of_open_dmas(struct fman *fman, u8 port_id, + u8 *num_of_open_dmas, + u8 *num_of_extra_open_dmas) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u8 open_dmas = *num_of_open_dmas; + u8 extra_open_dmas = *num_of_extra_open_dmas; + u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0; + u32 tmp; + + if (!open_dmas) { + /* Configuration according to values in the HW. + * read the current number of open Dma's + */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); + current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >> + BMI_EXTRA_NUM_OF_DMAS_SHIFT); + + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); + current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >> + BMI_NUM_OF_DMAS_SHIFT) + 1); + + /* This is the first configuration and user did not + * specify value (!open_dmas), reset values will be used + * and we just save these values for resource management + */ + fman->state->extra_open_dmas_pool_size = + (u8)max(fman->state->extra_open_dmas_pool_size, + current_extra_val); + fman->state->accumulated_num_of_open_dmas += current_val; + *num_of_open_dmas = current_val; + *num_of_extra_open_dmas = current_extra_val; + return 0; + } + + if (extra_open_dmas > current_extra_val) + fman->state->extra_open_dmas_pool_size = + (u8)max(fman->state->extra_open_dmas_pool_size, + extra_open_dmas); + + if ((fman->state->rev_info.major < 6) && + (fman->state->accumulated_num_of_open_dmas - current_val + + open_dmas > fman->state->max_num_of_open_dmas)) { + dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n", + __func__, fman->state->fm_id); + return -EAGAIN; + } else if ((fman->state->rev_info.major >= 6) && + !((fman->state->rev_info.major == 6) && + (fman->state->rev_info.minor == 0)) && + (fman->state->accumulated_num_of_open_dmas - + current_val + open_dmas > + fman->state->dma_thresh_max_commq + 1)) { + dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n", + __func__, fman->state->fm_id, + fman->state->dma_thresh_max_commq + 1); + return -EAGAIN; + } + + WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val); + /* update acummulated */ + fman->state->accumulated_num_of_open_dmas -= current_val; + fman->state->accumulated_num_of_open_dmas += open_dmas; + + if (fman->state->rev_info.major < 6) + total_num_dmas = + (u8)(fman->state->accumulated_num_of_open_dmas + + fman->state->extra_open_dmas_pool_size); + + /* calculate reg */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & + ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK); + tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) | + (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT)); + iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); + + /* update total num of DMA's with committed number of open DMAS, + * and max uncommitted pool. + */ + if (total_num_dmas) { + tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; + tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT; + iowrite32be(tmp, &bmi_rg->fmbm_cfg2); + } + + return 0; +} + +static int fman_config(struct fman *fman) +{ + void __iomem *base_addr; + int err; + + base_addr = fman->dts_params.base_addr; + + fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL); + if (!fman->state) + goto err_fm_state; + + /* Allocate the FM driver's parameters structure */ + fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL); + if (!fman->cfg) + goto err_fm_drv; + + /* Initialize MURAM block */ + fman->muram = + fman_muram_init(fman->dts_params.muram_res.start, + resource_size(&fman->dts_params.muram_res)); + if (!fman->muram) + goto err_fm_soc_specific; + + /* Initialize FM parameters which will be kept by the driver */ + fman->state->fm_id = fman->dts_params.id; + fman->state->fm_clk_freq = fman->dts_params.clk_freq; + fman->state->qman_channel_base = fman->dts_params.qman_channel_base; + fman->state->num_of_qman_channels = + fman->dts_params.num_of_qman_channels; + fman->state->res = fman->dts_params.res; + fman->exception_cb = fman_exceptions; + fman->bus_error_cb = fman_bus_error; + fman->fpm_regs = base_addr + FPM_OFFSET; + fman->bmi_regs = base_addr + BMI_OFFSET; + fman->qmi_regs = base_addr + QMI_OFFSET; + fman->dma_regs = base_addr + DMA_OFFSET; + fman->base_addr = base_addr; + + spin_lock_init(&fman->spinlock); + fman_defconfig(fman->cfg); + + fman->state->extra_fifo_pool_size = 0; + fman->state->exceptions = (EX_DMA_BUS_ERROR | + EX_DMA_READ_ECC | + EX_DMA_SYSTEM_WRITE_ECC | + EX_DMA_FM_WRITE_ECC | + EX_FPM_STALL_ON_TASKS | + EX_FPM_SINGLE_ECC | + EX_FPM_DOUBLE_ECC | + EX_QMI_DEQ_FROM_UNKNOWN_PORTID | + EX_BMI_LIST_RAM_ECC | + EX_BMI_STORAGE_PROFILE_ECC | + EX_BMI_STATISTICS_RAM_ECC | + EX_MURAM_ECC | + EX_BMI_DISPATCH_RAM_ECC | + EX_QMI_DOUBLE_ECC | + EX_QMI_SINGLE_ECC); + + /* Read FMan revision for future use*/ + fman_get_revision(fman, &fman->state->rev_info); + + err = fill_soc_specific_params(fman->state); + if (err) + goto err_fm_soc_specific; + + /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */ + if (fman->state->rev_info.major >= 6) + fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID; + + fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh; + + fman->state->total_num_of_tasks = + (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major, + fman->state->rev_info.minor, + fman->state->bmi_max_num_of_tasks); + + if (fman->state->rev_info.major < 6) { + fman->cfg->dma_comm_qtsh_clr_emer = + (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major, + fman->state->dma_thresh_max_commq); + + fman->cfg->dma_comm_qtsh_asrt_emer = + (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major, + fman->state->dma_thresh_max_commq); + + fman->cfg->dma_cam_num_of_entries = + DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major); + + fman->cfg->dma_read_buf_tsh_clr_emer = + DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_read_buf_tsh_asrt_emer = + DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_write_buf_tsh_clr_emer = + DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_write_buf_tsh_asrt_emer = + DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_axi_dbg_num_of_beats = + DFLT_AXI_DBG_NUM_OF_BEATS; + } + + return 0; + +err_fm_soc_specific: + kfree(fman->cfg); +err_fm_drv: + kfree(fman->state); +err_fm_state: + kfree(fman); + return -EINVAL; +} + +static int fman_init(struct fman *fman) +{ + struct fman_cfg *cfg = NULL; + int err = 0, i, count; + + if (is_init_done(fman->cfg)) + return -EINVAL; + + fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT; + + cfg = fman->cfg; + + /* clear revision-dependent non existing exception */ + if (fman->state->rev_info.major < 6) + fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC; + + if (fman->state->rev_info.major >= 6) + fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC; + + /* clear CPG */ + memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0, + fman->state->fm_port_num_of_cg); + + /* Save LIODN info before FMan reset + * Skipping non-existent port 0 (i = 1) + */ + for (i = 1; i < FMAN_LIODN_TBL; i++) { + u32 liodn_base; + + fman->liodn_offset[i] = + ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]); + liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]); + if (i % 2) { + /* FMDM_PLR LSB holds LIODN base for odd ports */ + liodn_base &= DMA_LIODN_BASE_MASK; + } else { + /* FMDM_PLR MSB holds LIODN base for even ports */ + liodn_base >>= DMA_LIODN_SHIFT; + liodn_base &= DMA_LIODN_BASE_MASK; + } + fman->liodn_base[i] = liodn_base; + } + + /* FMan Reset (supported only for FMan V2) */ + if (fman->state->rev_info.major >= 6) { + /* Errata A007273 */ + dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n", + __func__); + } else { + iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc); + /* Wait for reset completion */ + count = 100; + do { + udelay(1); + } while (((ioread32be(&fman->fpm_regs->fm_rstc)) & + FPM_RSTC_FM_RESET) && --count); + if (count == 0) + return -EBUSY; + } + + if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) { + resume(fman->fpm_regs); + /* Wait until QMI is not in halt not busy state */ + count = 100; + do { + udelay(1); + } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) & + QMI_GS_HALT_NOT_BUSY) && --count); + if (count == 0) + dev_warn(fman->dev, "%s: QMI is in halt not busy state\n", + __func__); + } + + if (clear_iram(fman) != 0) + return -EINVAL; + + cfg->exceptions = fman->state->exceptions; + + /* Init DMA Registers */ + + err = dma_init(fman); + if (err != 0) { + free_init_resources(fman); + return err; + } + + /* Init FPM Registers */ + fpm_init(fman->fpm_regs, fman->cfg); + + /* define common resources */ + /* allocate MURAM for FIFO according to total size */ + fman->fifo_offset = fman_muram_alloc(fman->muram, + fman->state->total_fifo_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + free_init_resources(fman); + dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", + __func__); + return -ENOMEM; + } + + cfg->fifo_base_addr = fman->fifo_offset; + cfg->total_fifo_size = fman->state->total_fifo_size; + cfg->total_num_of_tasks = fman->state->total_num_of_tasks; + cfg->clk_freq = fman->state->fm_clk_freq; + + /* Init BMI Registers */ + bmi_init(fman->bmi_regs, fman->cfg); + + /* Init QMI Registers */ + qmi_init(fman->qmi_regs, fman->cfg); + + err = enable(fman, cfg); + if (err != 0) + return err; + + enable_time_stamp(fman); + + kfree(fman->cfg); + fman->cfg = NULL; + + return 0; +} + +static int fman_set_exception(struct fman *fman, + enum fman_exceptions exception, bool enable) +{ + u32 bit_mask = 0; + + if (!is_init_done(fman->cfg)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + fman->state->exceptions |= bit_mask; + else + fman->state->exceptions &= ~bit_mask; + } else { + dev_err(fman->dev, "%s: Undefined exception (%d)\n", + __func__, exception); + return -EINVAL; + } + + return set_exception(fman, exception, enable); +} + +/** + * fman_register_intr + * @fman: A Pointer to FMan device + * @mod: Calling module + * @mod_id: Module id (if more than 1 exists, '0' if not) + * @intr_type: Interrupt type (error/normal) selection. + * @f_isr: The interrupt service routine. + * @h_src_arg: Argument to be passed to f_isr. + * + * Used to register an event handler to be processed by FMan + * + * Return: 0 on success; Error code otherwise. + */ +void fman_register_intr(struct fman *fman, enum fman_event_modules module, + u8 mod_id, enum fman_intr_type intr_type, + void (*isr_cb)(void *src_arg), void *src_arg) +{ + int event = 0; + + event = get_module_event(module, mod_id, intr_type); + WARN_ON(event >= FMAN_EV_CNT); + + /* register in local FM structure */ + fman->intr_mng[event].isr_cb = isr_cb; + fman->intr_mng[event].src_handle = src_arg; +} + +/** + * fman_unregister_intr + * @fman: A Pointer to FMan device + * @mod: Calling module + * @mod_id: Module id (if more than 1 exists, '0' if not) + * @intr_type: Interrupt type (error/normal) selection. + * + * Used to unregister an event handler to be processed by FMan + * + * Return: 0 on success; Error code otherwise. + */ +void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, + u8 mod_id, enum fman_intr_type intr_type) +{ + int event = 0; + + event = get_module_event(module, mod_id, intr_type); + WARN_ON(event >= FMAN_EV_CNT); + + fman->intr_mng[event].isr_cb = NULL; + fman->intr_mng[event].src_handle = NULL; +} + +/** + * fman_set_port_params + * @fman: A Pointer to FMan device + * @port_params: Port parameters + * + * Used by FMan Port to pass parameters to the FMan + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_port_params(struct fman *fman, + struct fman_port_init_params *port_params) +{ + int err; + unsigned long flags; + u8 port_id = port_params->port_id, mac_id; + + spin_lock_irqsave(&fman->spinlock, flags); + + err = set_num_of_tasks(fman, port_params->port_id, + &port_params->num_of_tasks, + &port_params->num_of_extra_tasks); + if (err) + goto return_err; + + /* TX Ports */ + if (port_params->port_type != FMAN_PORT_TYPE_RX) { + u32 enq_th, deq_th, reg; + + /* update qmi ENQ/DEQ threshold */ + fman->state->accumulated_num_of_deq_tnums += + port_params->deq_pipeline_depth; + enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) & + QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT; + /* if enq_th is too big, we reduce it to the max value + * that is still 0 + */ + if (enq_th >= (fman->state->qmi_max_num_of_tnums - + fman->state->accumulated_num_of_deq_tnums)) { + enq_th = + fman->state->qmi_max_num_of_tnums - + fman->state->accumulated_num_of_deq_tnums - 1; + + reg = ioread32be(&fman->qmi_regs->fmqm_gc); + reg &= ~QMI_CFG_ENQ_MASK; + reg |= (enq_th << QMI_CFG_ENQ_SHIFT); + iowrite32be(reg, &fman->qmi_regs->fmqm_gc); + } + + deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) & + QMI_CFG_DEQ_MASK; + /* if deq_th is too small, we enlarge it to the min + * value that is still 0. + * depTh may not be larger than 63 + * (fman->state->qmi_max_num_of_tnums-1). + */ + if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) && + (deq_th < fman->state->qmi_max_num_of_tnums - 1)) { + deq_th = fman->state->accumulated_num_of_deq_tnums + 1; + reg = ioread32be(&fman->qmi_regs->fmqm_gc); + reg &= ~QMI_CFG_DEQ_MASK; + reg |= deq_th; + iowrite32be(reg, &fman->qmi_regs->fmqm_gc); + } + } + + err = set_size_of_fifo(fman, port_params->port_id, + &port_params->size_of_fifo, + &port_params->extra_size_of_fifo); + if (err) + goto return_err; + + err = set_num_of_open_dmas(fman, port_params->port_id, + &port_params->num_of_open_dmas, + &port_params->num_of_extra_open_dmas); + if (err) + goto return_err; + + set_port_liodn(fman, port_id, fman->liodn_base[port_id], + fman->liodn_offset[port_id]); + + if (fman->state->rev_info.major < 6) + set_port_order_restoration(fman->fpm_regs, port_id); + + mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); + + if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) { + fman->state->port_mfl[mac_id] = port_params->max_frame_length; + } else { + dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n", + __func__, port_id, mac_id); + err = -EINVAL; + goto return_err; + } + + spin_unlock_irqrestore(&fman->spinlock, flags); + + return 0; + +return_err: + spin_unlock_irqrestore(&fman->spinlock, flags); + return err; +} + +/** + * fman_reset_mac + * @fman: A Pointer to FMan device + * @mac_id: MAC id to be reset + * + * Reset a specific MAC + * + * Return: 0 on success; Error code otherwise. + */ +int fman_reset_mac(struct fman *fman, u8 mac_id) +{ + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u32 msk, timeout = 100; + + if (fman->state->rev_info.major >= 6) { + dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n", + __func__); + return -EINVAL; + } + + /* Get the relevant bit mask */ + switch (mac_id) { + case 0: + msk = FPM_RSTC_MAC0_RESET; + break; + case 1: + msk = FPM_RSTC_MAC1_RESET; + break; + case 2: + msk = FPM_RSTC_MAC2_RESET; + break; + case 3: + msk = FPM_RSTC_MAC3_RESET; + break; + case 4: + msk = FPM_RSTC_MAC4_RESET; + break; + case 5: + msk = FPM_RSTC_MAC5_RESET; + break; + case 6: + msk = FPM_RSTC_MAC6_RESET; + break; + case 7: + msk = FPM_RSTC_MAC7_RESET; + break; + case 8: + msk = FPM_RSTC_MAC8_RESET; + break; + case 9: + msk = FPM_RSTC_MAC9_RESET; + break; + default: + dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n", + __func__, mac_id); + return -EINVAL; + } + + /* reset */ + iowrite32be(msk, &fpm_rg->fm_rstc); + while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout) + udelay(10); + + if (!timeout) + return -EIO; + + return 0; +} + +/** + * fman_set_mac_max_frame + * @fman: A Pointer to FMan device + * @mac_id: MAC id + * @mfl: Maximum frame length + * + * Set maximum frame length of specific MAC in FMan driver + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) +{ + /* if port is already initialized, check that MaxFrameLength is smaller + * or equal to the port's max + */ + if ((!fman->state->port_mfl[mac_id]) || + (fman->state->port_mfl[mac_id] && + (mfl <= fman->state->port_mfl[mac_id]))) { + fman->state->mac_mfl[mac_id] = mfl; + } else { + dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", + __func__); + return -EINVAL; + } + return 0; +} + +/** + * fman_get_clock_freq + * @fman: A Pointer to FMan device + * + * Get FMan clock frequency + * + * Return: FMan clock frequency + */ +u16 fman_get_clock_freq(struct fman *fman) +{ + return fman->state->fm_clk_freq; +} + +/** + * fman_get_bmi_max_fifo_size + * @fman: A Pointer to FMan device + * + * Get FMan maximum FIFO size + * + * Return: FMan Maximum FIFO size + */ +u32 fman_get_bmi_max_fifo_size(struct fman *fman) +{ + return fman->state->bmi_max_fifo_size; +} + +/** + * fman_get_revision + * @fman - Pointer to the FMan module + * @rev_info - A structure of revision information parameters. + * + * Returns the FM revision + * + * Allowed only following fman_init(). + * + * Return: 0 on success; Error code otherwise. + */ +void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) +{ + u32 tmp; + + tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1); + rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >> + FPM_REV1_MAJOR_SHIFT); + rev_info->minor = tmp & FPM_REV1_MINOR_MASK; +} + +/** + * fman_get_qman_channel_id + * @fman: A Pointer to FMan device + * @port_id: Port id + * + * Get QMan channel ID associated to the Port id + * + * Return: QMan channel ID + */ +u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) +{ + int i; + + if (fman->state->rev_info.major >= 6) { + u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, + 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { + if (port_ids[i] == port_id) + break; + } + } else { + u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, + 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { + if (port_ids[i] == port_id) + break; + } + } + + if (i == fman->state->num_of_qman_channels) + return 0; + + return fman->state->qman_channel_base + i; +} + +/** + * fman_get_mem_region + * @fman: A Pointer to FMan device + * + * Get FMan memory region + * + * Return: A structure with FMan memory region information + */ +struct resource *fman_get_mem_region(struct fman *fman) +{ + return fman->state->res; +} + +/* Bootargs defines */ +/* Extra headroom for RX buffers - Default, min and max */ +#define FSL_FM_RX_EXTRA_HEADROOM 64 +#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16 +#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384 + +/* Maximum frame length */ +#define FSL_FM_MAX_FRAME_SIZE 1522 +#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600 +#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64 + +/* Extra headroom for Rx buffers. + * FMan is instructed to allocate, on the Rx path, this amount of + * space at the beginning of a data buffer, beside the DPA private + * data area and the IC fields. + * Does not impact Tx buffer layout. + * Configurable from bootargs. 64 by default, it's needed on + * particular forwarding scenarios that add extra headers to the + * forwarded frame. + */ +int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; +module_param(fsl_fm_rx_extra_headroom, int, 0); +MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); + +/* Max frame size, across all interfaces. + * Configurable from bootargs, to avoid allocating oversized (socket) + * buffers when not using jumbo frames. + * Must be large enough to accommodate the network MTU, but small enough + * to avoid wasting skb memory. + * + * Could be overridden once, at boot-time, via the + * fm_set_max_frm() callback. + */ +int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; +module_param(fsl_fm_max_frm, int, 0); +MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); + +/** + * fman_get_max_frm + * + * Return: Max frame length configured in the FM driver + */ +u16 fman_get_max_frm(void) +{ + static bool fm_check_mfl; + + if (!fm_check_mfl) { + if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE || + fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) { + pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", + fsl_fm_max_frm, + FSL_FM_MIN_POSSIBLE_FRAME_SIZE, + FSL_FM_MAX_POSSIBLE_FRAME_SIZE, + FSL_FM_MAX_FRAME_SIZE); + fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; + } + fm_check_mfl = true; + } + + return fsl_fm_max_frm; +} +EXPORT_SYMBOL(fman_get_max_frm); + +/** + * fman_get_rx_extra_headroom + * + * Return: Extra headroom size configured in the FM driver + */ +int fman_get_rx_extra_headroom(void) +{ + static bool fm_check_rx_extra_headroom; + + if (!fm_check_rx_extra_headroom) { + if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX || + fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) { + pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", + fsl_fm_rx_extra_headroom, + FSL_FM_RX_EXTRA_HEADROOM_MIN, + FSL_FM_RX_EXTRA_HEADROOM_MAX, + FSL_FM_RX_EXTRA_HEADROOM); + fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; + } + + fm_check_rx_extra_headroom = true; + fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16); + } + + return fsl_fm_rx_extra_headroom; +} +EXPORT_SYMBOL(fman_get_rx_extra_headroom); + +/** + * fman_bind + * @dev: FMan OF device pointer + * + * Bind to a specific FMan device. + * + * Allowed only after the port was created. + * + * Return: A pointer to the FMan device + */ +struct fman *fman_bind(struct device *fm_dev) +{ + return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); +} + +static irqreturn_t fman_err_irq(int irq, void *handle) +{ + struct fman *fman = (struct fman *)handle; + u32 pending; + struct fman_fpm_regs __iomem *fpm_rg; + irqreturn_t single_ret, ret = IRQ_NONE; + + if (!is_init_done(fman->cfg)) + return IRQ_NONE; + + fpm_rg = fman->fpm_regs; + + /* error interrupts */ + pending = ioread32be(&fpm_rg->fm_epi); + if (!pending) + return IRQ_NONE; + + if (pending & ERR_INTR_EN_BMI) { + single_ret = bmi_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_QMI) { + single_ret = qmi_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_FPM) { + single_ret = fpm_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_DMA) { + single_ret = dma_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MURAM) { + single_ret = muram_err_intr(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + /* MAC error interrupts */ + if (pending & ERR_INTR_EN_MAC0) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC1) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC2) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC3) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC4) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC5) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC6) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC7) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC8) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC9) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + return ret; +} + +static irqreturn_t fman_irq(int irq, void *handle) +{ + struct fman *fman = (struct fman *)handle; + u32 pending; + struct fman_fpm_regs __iomem *fpm_rg; + irqreturn_t single_ret, ret = IRQ_NONE; + + if (!is_init_done(fman->cfg)) + return IRQ_NONE; + + fpm_rg = fman->fpm_regs; + + /* normal interrupts */ + pending = ioread32be(&fpm_rg->fm_npi); + if (!pending) + return IRQ_NONE; + + if (pending & INTR_EN_QMI) { + single_ret = qmi_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + /* MAC interrupts */ + if (pending & INTR_EN_MAC0) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC1) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC2) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC3) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC4) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC5) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC6) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC7) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC8) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC9) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + return ret; +} + +static const struct of_device_id fman_muram_match[] = { + { + .compatible = "fsl,fman-muram"}, + {} +}; +MODULE_DEVICE_TABLE(of, fman_muram_match); + +static struct fman *read_dts_node(struct platform_device *of_dev) +{ + struct fman *fman; + struct device_node *fm_node, *muram_node; + struct resource *res; + const u32 *u32_prop; + int lenp, err, irq; + struct clk *clk; + u32 clk_rate; + phys_addr_t phys_base_addr; + resource_size_t mem_size; + + fman = kzalloc(sizeof(*fman), GFP_KERNEL); + if (!fman) + return NULL; + + fm_node = of_node_get(of_dev->dev.of_node); + + u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n", + __func__, fm_node->full_name); + goto fman_node_put; + } + if (WARN_ON(lenp != sizeof(u32))) + goto fman_node_put; + + fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]); + + /* Get the FM interrupt */ + res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n", + __func__); + goto fman_node_put; + } + irq = res->start; + + /* Get the FM error interrupt */ + res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n", + __func__); + goto fman_node_put; + } + fman->dts_params.err_irq = res->start; + + /* Get the FM address */ + res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n", + __func__); + goto fman_node_put; + } + + phys_base_addr = res->start; + mem_size = resource_size(res); + + clk = of_clk_get(fm_node, 0); + if (IS_ERR(clk)) { + dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n", + __func__, fman->dts_params.id); + goto fman_node_put; + } + + clk_rate = clk_get_rate(clk); + if (!clk_rate) { + dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n", + __func__, fman->dts_params.id); + goto fman_node_put; + } + /* Rounding to MHz */ + fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); + + u32_prop = (const u32 *)of_get_property(fm_node, + "fsl,qman-channel-range", + &lenp); + if (!u32_prop) { + dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n", + __func__, fm_node->full_name); + goto fman_node_put; + } + if (WARN_ON(lenp != sizeof(u32) * 2)) + goto fman_node_put; + fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]); + fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]); + + /* Get the MURAM base address and size */ + muram_node = of_find_matching_node(fm_node, fman_muram_match); + if (!muram_node) { + dev_err(&of_dev->dev, "%s: could not find MURAM node\n", + __func__); + goto fman_node_put; + } + + err = of_address_to_resource(muram_node, 0, + &fman->dts_params.muram_res); + if (err) { + of_node_put(muram_node); + dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", + __func__, err); + goto fman_node_put; + } + + of_node_put(muram_node); + of_node_put(fm_node); + + err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + if (err < 0) { + dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", + __func__, irq, err); + goto fman_free; + } + + if (fman->dts_params.err_irq != 0) { + err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq, + fman_err_irq, IRQF_SHARED, + "fman-err", fman); + if (err < 0) { + dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", + __func__, fman->dts_params.err_irq, err); + goto fman_free; + } + } + + fman->dts_params.res = + devm_request_mem_region(&of_dev->dev, phys_base_addr, + mem_size, "fman"); + if (!fman->dts_params.res) { + dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", + __func__); + goto fman_free; + } + + fman->dts_params.base_addr = + devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); + if (fman->dts_params.base_addr == 0) { + dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); + goto fman_free; + } + + return fman; + +fman_node_put: + of_node_put(fm_node); +fman_free: + kfree(fman); + return NULL; +} + +static int fman_probe(struct platform_device *of_dev) +{ + struct fman *fman; + struct device *dev; + int err; + + dev = &of_dev->dev; + + fman = read_dts_node(of_dev); + if (!fman) + return -EIO; + + err = fman_config(fman); + if (err) { + dev_err(dev, "%s: FMan config failed\n", __func__); + return -EINVAL; + } + + if (fman_init(fman) != 0) { + dev_err(dev, "%s: FMan init failed\n", __func__); + return -EINVAL; + } + + if (fman->dts_params.err_irq == 0) { + fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false); + fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false); + fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false); + fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false); + fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false); + fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false); + fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false); + fman_set_exception(fman, + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false); + fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false); + fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC, + false); + fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false); + fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false); + } + + dev_set_drvdata(dev, fman); + + fman->dev = dev; + + dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); + + return 0; +} + +static const struct of_device_id fman_match[] = { + { + .compatible = "fsl,fman"}, + {} +}; + +MODULE_DEVICE_TABLE(of, fm_match); + +static struct platform_driver fman_driver = { + .driver = { + .name = "fsl-fman", + .of_match_table = fman_match, + }, + .probe = fman_probe, +}; + +builtin_platform_driver(fman_driver); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h new file mode 100644 index 000000000000..57aae8d17d77 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -0,0 +1,325 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FM_H +#define __FM_H + +#include <linux/io.h> + +/* FM Frame descriptor macros */ +/* Frame queue Context Override */ +#define FM_FD_CMD_FCO 0x80000000 +#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ + +/* TX-Port: Unsupported Format */ +#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000 +/* TX Port: Length Error */ +#define FM_FD_ERR_LENGTH 0x02000000 +#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */ + +/* IPR frame (not error) */ +#define FM_FD_IPR 0x00000001 +/* IPR non-consistent-sp */ +#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR) +/* IPR error */ +#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR) +/* IPR timeout */ +#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR) +/* TX Port: Length Error */ +#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) + +/* Rx FIFO overflow, FCS error, code error, running disparity error + * (SGMII and TBI modes), FIFO parity error. PHY Sequence error, + * PHY error control character detected. + */ +#define FM_FD_ERR_PHYSICAL 0x00080000 +/* Frame too long OR Frame size exceeds max_length_frame */ +#define FM_FD_ERR_SIZE 0x00040000 +/* classification discard */ +#define FM_FD_ERR_CLS_DISCARD 0x00020000 +/* Extract Out of Frame */ +#define FM_FD_ERR_EXTRACTION 0x00008000 +/* No Scheme Selected */ +#define FM_FD_ERR_NO_SCHEME 0x00004000 +/* Keysize Overflow */ +#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000 +/* Frame color is red */ +#define FM_FD_ERR_COLOR_RED 0x00000800 +/* Frame color is yellow */ +#define FM_FD_ERR_COLOR_YELLOW 0x00000400 +/* Parser Time out Exceed */ +#define FM_FD_ERR_PRS_TIMEOUT 0x00000080 +/* Invalid Soft Parser instruction */ +#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040 +/* Header error was identified during parsing */ +#define FM_FD_ERR_PRS_HDR_ERR 0x00000020 +/* Frame parsed beyind 256 first bytes */ +#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008 + +/* non Frame-Manager error */ +#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000 + +/* FMan driver defines */ +#define FMAN_BMI_FIFO_UNITS 0x100 +#define OFFSET_UNITS 16 + +/* BMan defines */ +#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */ +#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */ + +struct fman; /* FMan data */ + +/* Enum for defining port types */ +enum fman_port_type { + FMAN_PORT_TYPE_TX = 0, /* TX Port */ + FMAN_PORT_TYPE_RX, /* RX Port */ +}; + +struct fman_rev_info { + u8 major; /* Major revision */ + u8 minor; /* Minor revision */ +}; + +enum fman_exceptions { + FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */ + FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */ + FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */ + FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */ + FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */ + FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */ + FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */ + FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */ + FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */ + FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */ + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */ + FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */ + FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */ + FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */ + FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */ + FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */ + FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */ +}; + +/* Parse results memory layout */ +struct fman_prs_result { + u8 lpid; /* Logical port id */ + u8 shimr; /* Shim header result */ + u16 l2r; /* Layer 2 result */ + u16 l3r; /* Layer 3 result */ + u8 l4r; /* Layer 4 result */ + u8 cplan; /* Classification plan id */ + u16 nxthdr; /* Next Header */ + u16 cksum; /* Running-sum */ + /* Flags&fragment-offset field of the last IP-header */ + u16 flags_frag_off; + /* Routing type field of a IPV6 routing extension header */ + u8 route_type; + /* Routing Extension Header Present; last bit is IP valid */ + u8 rhp_ip_valid; + u8 shim_off[2]; /* Shim offset */ + u8 ip_pid_off; /* IP PID (last IP-proto) offset */ + u8 eth_off; /* ETH offset */ + u8 llc_snap_off; /* LLC_SNAP offset */ + u8 vlan_off[2]; /* VLAN offset */ + u8 etype_off; /* ETYPE offset */ + u8 pppoe_off; /* PPP offset */ + u8 mpls_off[2]; /* MPLS offset */ + u8 ip_off[2]; /* IP offset */ + u8 gre_off; /* GRE offset */ + u8 l4_off; /* Layer 4 offset */ + u8 nxthdr_off; /* Parser end point */ +}; + +/* A structure for defining buffer prefix area content. */ +struct fman_buffer_prefix_content { + /* Number of bytes to be left at the beginning of the external + * buffer; Note that the private-area will start from the base + * of the buffer address. + */ + u16 priv_data_size; + /* true to pass the parse result to/from the FM; + * User may use FM_PORT_GetBufferPrsResult() in + * order to get the parser-result from a buffer. + */ + bool pass_prs_result; + /* true to pass the timeStamp to/from the FM User */ + bool pass_time_stamp; + /* true to pass the KG hash result to/from the FM User may + * use FM_PORT_GetBufferHashResult() in order to get the + * parser-result from a buffer. + */ + bool pass_hash_result; + /* Add all other Internal-Context information: AD, + * hash-result, key, etc. + */ + u16 data_align; +}; + +/* A structure of information about each of the external + * buffer pools used by a port or storage-profile. + */ +struct fman_ext_pool_params { + u8 id; /* External buffer pool id */ + u16 size; /* External buffer pool buffer size */ +}; + +/* A structure for informing the driver about the external + * buffer pools allocated in the BM and used by a port or a + * storage-profile. + */ +struct fman_ext_pools { + u8 num_of_pools_used; /* Number of pools use by this port */ + struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM]; + /* Parameters for each port */ +}; + +/* A structure for defining BM pool depletion criteria */ +struct fman_buf_pool_depletion { + /* select mode in which pause frames will be sent after a + * number of pools (all together!) are depleted + */ + bool pools_grp_mode_enable; + /* the number of depleted pools that will invoke pause + * frames transmission. + */ + u8 num_of_pools; + /* For each pool, true if it should be considered for + * depletion (Note - this pool must be used by this port!). + */ + bool pools_to_consider[BM_MAX_NUM_OF_POOLS]; + /* select mode in which pause frames will be sent + * after a single-pool is depleted; + */ + bool single_pool_mode_enable; + /* For each pool, true if it should be considered + * for depletion (Note - this pool must be used by this port!) + */ + bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS]; +}; + +/* Enum for inter-module interrupts registration */ +enum fman_event_modules { + FMAN_MOD_MAC = 0, /* MAC event */ + FMAN_MOD_FMAN_CTRL, /* FMAN Controller */ + FMAN_MOD_DUMMY_LAST +}; + +/* Enum for interrupts types */ +enum fman_intr_type { + FMAN_INTR_TYPE_ERR, + FMAN_INTR_TYPE_NORMAL +}; + +/* Enum for inter-module interrupts registration */ +enum fman_inter_module_event { + FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */ + FMAN_EV_ERR_MAC1, /* MAC 1 error event */ + FMAN_EV_ERR_MAC2, /* MAC 2 error event */ + FMAN_EV_ERR_MAC3, /* MAC 3 error event */ + FMAN_EV_ERR_MAC4, /* MAC 4 error event */ + FMAN_EV_ERR_MAC5, /* MAC 5 error event */ + FMAN_EV_ERR_MAC6, /* MAC 6 error event */ + FMAN_EV_ERR_MAC7, /* MAC 7 error event */ + FMAN_EV_ERR_MAC8, /* MAC 8 error event */ + FMAN_EV_ERR_MAC9, /* MAC 9 error event */ + FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */ + FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */ + FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */ + FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */ + FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */ + FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */ + FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */ + FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */ + FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */ + FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */ + FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */ + FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */ + FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */ + FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */ + FMAN_EV_CNT +}; + +struct fman_intr_src { + void (*isr_cb)(void *src_arg); + void *src_handle; +}; + +/* Structure for port-FM communication during fman_port_init. */ +struct fman_port_init_params { + u8 port_id; /* port Id */ + enum fman_port_type port_type; /* Port type */ + u16 port_speed; /* Port speed */ + u16 liodn_offset; /* Port's requested resource */ + u8 num_of_tasks; /* Port's requested resource */ + u8 num_of_extra_tasks; /* Port's requested resource */ + u8 num_of_open_dmas; /* Port's requested resource */ + u8 num_of_extra_open_dmas; /* Port's requested resource */ + u32 size_of_fifo; /* Port's requested resource */ + u32 extra_size_of_fifo; /* Port's requested resource */ + u8 deq_pipeline_depth; /* Port's requested resource */ + u16 max_frame_length; /* Port's max frame length. */ + u16 liodn_base; + /* LIODN base for this port, to be used together with LIODN offset. */ +}; + +void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info); + +void fman_register_intr(struct fman *fman, enum fman_event_modules mod, + u8 mod_id, enum fman_intr_type intr_type, + void (*f_isr)(void *h_src_arg), void *h_src_arg); + +void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod, + u8 mod_id, enum fman_intr_type intr_type); + +int fman_set_port_params(struct fman *fman, + struct fman_port_init_params *port_params); + +int fman_reset_mac(struct fman *fman, u8 mac_id); + +u16 fman_get_clock_freq(struct fman *fman); + +u32 fman_get_bmi_max_fifo_size(struct fman *fman); + +int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl); + +u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id); + +struct resource *fman_get_mem_region(struct fman *fman); + +u16 fman_get_max_frm(void); + +int fman_get_rx_extra_headroom(void); + +struct fman *fman_bind(struct device *dev); + +#endif /* __FM_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c new file mode 100644 index 000000000000..6b1261c0b1c2 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -0,0 +1,1453 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_dtsec.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/bitrev.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/phy.h> +#include <linux/crc32.h> +#include <linux/of_mdio.h> +#include <linux/mii.h> + +/* TBI register addresses */ +#define MII_TBICON 0x11 + +/* TBICON register bit fields */ +#define TBICON_SOFT_RESET 0x8000 /* Soft reset */ +#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */ +#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */ +#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */ +#define TBICON_CLK_SELECT 0x0020 /* Clock select */ +#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */ + +#define TBIANA_SGMII 0x4001 +#define TBIANA_1000X 0x01a0 + +/* Interrupt Mask Register (IMASK) */ +#define DTSEC_IMASK_BREN 0x80000000 +#define DTSEC_IMASK_RXCEN 0x40000000 +#define DTSEC_IMASK_MSROEN 0x04000000 +#define DTSEC_IMASK_GTSCEN 0x02000000 +#define DTSEC_IMASK_BTEN 0x01000000 +#define DTSEC_IMASK_TXCEN 0x00800000 +#define DTSEC_IMASK_TXEEN 0x00400000 +#define DTSEC_IMASK_LCEN 0x00040000 +#define DTSEC_IMASK_CRLEN 0x00020000 +#define DTSEC_IMASK_XFUNEN 0x00010000 +#define DTSEC_IMASK_ABRTEN 0x00008000 +#define DTSEC_IMASK_IFERREN 0x00004000 +#define DTSEC_IMASK_MAGEN 0x00000800 +#define DTSEC_IMASK_MMRDEN 0x00000400 +#define DTSEC_IMASK_MMWREN 0x00000200 +#define DTSEC_IMASK_GRSCEN 0x00000100 +#define DTSEC_IMASK_TDPEEN 0x00000002 +#define DTSEC_IMASK_RDPEEN 0x00000001 + +#define DTSEC_EVENTS_MASK \ + ((u32)(DTSEC_IMASK_BREN | \ + DTSEC_IMASK_RXCEN | \ + DTSEC_IMASK_BTEN | \ + DTSEC_IMASK_TXCEN | \ + DTSEC_IMASK_TXEEN | \ + DTSEC_IMASK_ABRTEN | \ + DTSEC_IMASK_LCEN | \ + DTSEC_IMASK_CRLEN | \ + DTSEC_IMASK_XFUNEN | \ + DTSEC_IMASK_IFERREN | \ + DTSEC_IMASK_MAGEN | \ + DTSEC_IMASK_TDPEEN | \ + DTSEC_IMASK_RDPEEN)) + +/* dtsec timestamp event bits */ +#define TMR_PEMASK_TSREEN 0x00010000 +#define TMR_PEVENT_TSRE 0x00010000 + +/* Group address bit indication */ +#define MAC_GROUP_ADDRESS 0x0000010000000000ULL + +/* Defaults */ +#define DEFAULT_HALFDUP_RETRANSMIT 0xf +#define DEFAULT_HALFDUP_COLL_WINDOW 0x37 +#define DEFAULT_TX_PAUSE_TIME 0xf000 +#define DEFAULT_RX_PREPEND 0 +#define DEFAULT_PREAMBLE_LEN 7 +#define DEFAULT_TX_PAUSE_TIME_EXTD 0 +#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40 +#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60 +#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50 +#define DEFAULT_BACK_TO_BACK_IPG 0x60 +#define DEFAULT_MAXIMUM_FRAME 0x600 + +/* register related defines (bits, field offsets..) */ +#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000 + +#define DTSEC_ECNTRL_GMIIM 0x00000040 +#define DTSEC_ECNTRL_TBIM 0x00000020 +#define DTSEC_ECNTRL_SGMIIM 0x00000002 +#define DTSEC_ECNTRL_RPM 0x00000010 +#define DTSEC_ECNTRL_R100M 0x00000008 +#define DTSEC_ECNTRL_QSGMIIM 0x00000001 + +#define DTSEC_TCTRL_GTS 0x00000020 + +#define RCTRL_PAL_MASK 0x001f0000 +#define RCTRL_PAL_SHIFT 16 +#define RCTRL_GHTX 0x00000400 +#define RCTRL_GRS 0x00000020 +#define RCTRL_MPROM 0x00000008 +#define RCTRL_RSF 0x00000004 +#define RCTRL_UPROM 0x00000001 + +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_TX_EN 0x00000001 +#define MACCFG1_RX_EN 0x00000004 + +#define MACCFG2_NIBBLE_MODE 0x00000100 +#define MACCFG2_BYTE_MODE 0x00000200 +#define MACCFG2_PAD_CRC_EN 0x00000004 +#define MACCFG2_FULL_DUPLEX 0x00000001 +#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000 +#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16 +#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000 +#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00 +#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F + +#define HAFDUP_EXCESS_DEFER 0x00010000 +#define HAFDUP_COLLISION_WINDOW 0x000003ff +#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12 +#define HAFDUP_RETRANSMISSION_MAX 0x0000f000 + +#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */ + +#define PTV_PTE_MASK 0xffff0000 +#define PTV_PT_MASK 0x0000ffff +#define PTV_PTE_SHIFT 16 + +#define MAX_PACKET_ALIGNMENT 31 +#define MAX_INTER_PACKET_GAP 0x7f +#define MAX_RETRANSMISSION 0x0f +#define MAX_COLLISION_WINDOW 0x03ff + +/* Hash table size (32 bits*8 regs) */ +#define DTSEC_HASH_TABLE_SIZE 256 +/* Extended Hash table size (32 bits*16 regs) */ +#define EXTENDED_HASH_TABLE_SIZE 512 + +/* dTSEC Memory Map registers */ +struct dtsec_regs { + /* dTSEC General Control and Status Registers */ + u32 tsec_id; /* 0x000 ETSEC_ID register */ + u32 tsec_id2; /* 0x004 ETSEC_ID2 register */ + u32 ievent; /* 0x008 Interrupt event register */ + u32 imask; /* 0x00C Interrupt mask register */ + u32 reserved0010[1]; + u32 ecntrl; /* 0x014 E control register */ + u32 ptv; /* 0x018 Pause time value register */ + u32 tbipa; /* 0x01C TBI PHY address register */ + u32 tmr_ctrl; /* 0x020 Time-stamp Control register */ + u32 tmr_pevent; /* 0x024 Time-stamp event register */ + u32 tmr_pemask; /* 0x028 Timer event mask register */ + u32 reserved002c[5]; + u32 tctrl; /* 0x040 Transmit control register */ + u32 reserved0044[3]; + u32 rctrl; /* 0x050 Receive control register */ + u32 reserved0054[11]; + u32 igaddr[8]; /* 0x080-0x09C Individual/group address */ + u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */ + u32 reserved00c0[16]; + u32 maccfg1; /* 0x100 MAC configuration #1 */ + u32 maccfg2; /* 0x104 MAC configuration #2 */ + u32 ipgifg; /* 0x108 IPG/IFG */ + u32 hafdup; /* 0x10C Half-duplex */ + u32 maxfrm; /* 0x110 Maximum frame */ + u32 reserved0114[10]; + u32 ifstat; /* 0x13C Interface status */ + u32 macstnaddr1; /* 0x140 Station Address,part 1 */ + u32 macstnaddr2; /* 0x144 Station Address,part 2 */ + struct { + u32 exact_match1; /* octets 1-4 */ + u32 exact_match2; /* octets 5-6 */ + } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */ + u32 reserved01c0[16]; + u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */ + u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */ + u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */ + u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */ + u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */ + u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */ + u32 trmgv; + /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */ + u32 rbyt; /* 0x21C receive byte counter */ + u32 rpkt; /* 0x220 receive packet counter */ + u32 rfcs; /* 0x224 receive FCS error counter */ + u32 rmca; /* 0x228 RMCA Rx multicast packet counter */ + u32 rbca; /* 0x22C Rx broadcast packet counter */ + u32 rxcf; /* 0x230 Rx control frame packet counter */ + u32 rxpf; /* 0x234 Rx pause frame packet counter */ + u32 rxuo; /* 0x238 Rx unknown OP code counter */ + u32 raln; /* 0x23C Rx alignment error counter */ + u32 rflr; /* 0x240 Rx frame length error counter */ + u32 rcde; /* 0x244 Rx code error counter */ + u32 rcse; /* 0x248 Rx carrier sense error counter */ + u32 rund; /* 0x24C Rx undersize packet counter */ + u32 rovr; /* 0x250 Rx oversize packet counter */ + u32 rfrg; /* 0x254 Rx fragments counter */ + u32 rjbr; /* 0x258 Rx jabber counter */ + u32 rdrp; /* 0x25C Rx drop */ + u32 tbyt; /* 0x260 Tx byte counter */ + u32 tpkt; /* 0x264 Tx packet counter */ + u32 tmca; /* 0x268 Tx multicast packet counter */ + u32 tbca; /* 0x26C Tx broadcast packet counter */ + u32 txpf; /* 0x270 Tx pause control frame counter */ + u32 tdfr; /* 0x274 Tx deferral packet counter */ + u32 tedf; /* 0x278 Tx excessive deferral packet counter */ + u32 tscl; /* 0x27C Tx single collision packet counter */ + u32 tmcl; /* 0x280 Tx multiple collision packet counter */ + u32 tlcl; /* 0x284 Tx late collision packet counter */ + u32 txcl; /* 0x288 Tx excessive collision packet counter */ + u32 tncl; /* 0x28C Tx total collision counter */ + u32 reserved0290[1]; + u32 tdrp; /* 0x294 Tx drop frame counter */ + u32 tjbr; /* 0x298 Tx jabber frame counter */ + u32 tfcs; /* 0x29C Tx FCS error counter */ + u32 txcf; /* 0x2A0 Tx control frame counter */ + u32 tovr; /* 0x2A4 Tx oversize frame counter */ + u32 tund; /* 0x2A8 Tx undersize frame counter */ + u32 tfrg; /* 0x2AC Tx fragments frame counter */ + u32 car1; /* 0x2B0 carry register one register* */ + u32 car2; /* 0x2B4 carry register two register* */ + u32 cam1; /* 0x2B8 carry register one mask register */ + u32 cam2; /* 0x2BC carry register two mask register */ + u32 reserved02c0[848]; +}; + +/* struct dtsec_cfg - dTSEC configuration + * Transmit half-duplex flow control, under software control for 10/100-Mbps + * half-duplex media. If set, back pressure is applied to media by raising + * carrier. + * halfdup_retransmit: + * Number of retransmission attempts following a collision. + * If this is exceeded dTSEC aborts transmission due to excessive collisions. + * The standard specifies the attempt limit to be 15. + * halfdup_coll_window: + * The number of bytes of the frame during which collisions may occur. + * The default value of 55 corresponds to the frame byte at the end of the + * standard 512-bit slot time window. If collisions are detected after this + * byte, the late collision event is asserted and transmission of current + * frame is aborted. + * tx_pad_crc: + * Pad and append CRC. If set, the MAC pads all ransmitted short frames and + * appends a CRC to every frame regardless of padding requirement. + * tx_pause_time: + * Transmit pause time value. This pause value is used as part of the pause + * frame to be sent when a transmit pause frame is initiated. + * If set to 0 this disables transmission of pause frames. + * preamble_len: + * Length, in bytes, of the preamble field preceding each Ethernet + * start-of-frame delimiter byte. The default value of 0x7 should be used in + * order to guarantee reliable operation with IEEE 802.3 compliant hardware. + * rx_prepend: + * Packet alignment padding length. The specified number of bytes (1-31) + * of zero padding are inserted before the start of each received frame. + * For Ethernet, where optional preamble extraction is enabled, the padding + * appears before the preamble, otherwise the padding precedes the + * layer 2 header. + * + * This structure contains basic dTSEC configuration and must be passed to + * init() function. A default set of configuration values can be + * obtained by calling set_dflts(). + */ +struct dtsec_cfg { + u16 halfdup_retransmit; + u16 halfdup_coll_window; + bool tx_pad_crc; + u16 tx_pause_time; + bool ptp_tsu_en; + bool ptp_exception_en; + u32 preamble_len; + u32 rx_prepend; + u16 tx_pause_time_extd; + u16 maximum_frame; + u32 non_back_to_back_ipg1; + u32 non_back_to_back_ipg2; + u32 min_ifg_enforcement; + u32 back_to_back_ipg; +}; + +struct fman_mac { + /* pointer to dTSEC memory mapped registers */ + struct dtsec_regs __iomem *regs; + /* MAC address of device */ + u64 addr; + /* Ethernet physical interface */ + phy_interface_t phy_if; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* Number of individual addresses in registers for this station */ + u8 num_of_ind_addr_in_regs; + /* pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + bool ptp_tsu_enabled; + bool en_tsu_err_exeption; + struct dtsec_cfg *dtsec_drv_param; + void *fm; + struct fman_rev_info fm_rev_info; + bool basex_if; + struct phy_device *tbiphy; +}; + +static void set_dflts(struct dtsec_cfg *cfg) +{ + cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT; + cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW; + cfg->tx_pad_crc = true; + cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME; + /* PHY address 0 is reserved (DPAA RM) */ + cfg->rx_prepend = DEFAULT_RX_PREPEND; + cfg->ptp_tsu_en = true; + cfg->ptp_exception_en = true; + cfg->preamble_len = DEFAULT_PREAMBLE_LEN; + cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD; + cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1; + cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2; + cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT; + cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG; + cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME; +} + +static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, + phy_interface_t iface, u16 iface_speed, u8 *macaddr, + u32 exception_mask, u8 tbi_addr) +{ + bool is_rgmii, is_sgmii, is_qsgmii; + int i; + u32 tmp; + + /* Soft reset */ + iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); + iowrite32be(0, ®s->maccfg1); + + /* dtsec_id2 */ + tmp = ioread32be(®s->tsec_id2); + + /* check RGMII support */ + if (iface == PHY_INTERFACE_MODE_RGMII || + iface == PHY_INTERFACE_MODE_RMII) + if (tmp & DTSEC_ID2_INT_REDUCED_OFF) + return -EINVAL; + + if (iface == PHY_INTERFACE_MODE_SGMII || + iface == PHY_INTERFACE_MODE_MII) + if (tmp & DTSEC_ID2_INT_REDUCED_OFF) + return -EINVAL; + + is_rgmii = iface == PHY_INTERFACE_MODE_RGMII; + is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; + is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; + + tmp = 0; + if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII) + tmp |= DTSEC_ECNTRL_GMIIM; + if (is_sgmii) + tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); + if (is_qsgmii) + tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | + DTSEC_ECNTRL_QSGMIIM); + if (is_rgmii) + tmp |= DTSEC_ECNTRL_RPM; + if (iface_speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + + iowrite32be(tmp, ®s->ecntrl); + + tmp = 0; + + if (cfg->tx_pause_time) + tmp |= cfg->tx_pause_time; + if (cfg->tx_pause_time_extd) + tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT; + iowrite32be(tmp, ®s->ptv); + + tmp = 0; + tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK; + /* Accept short frames */ + tmp |= RCTRL_RSF; + + iowrite32be(tmp, ®s->rctrl); + + /* Assign a Phy Address to the TBI (TBIPA). + * Done also in cases where TBI is not selected to avoid conflict with + * the external PHY's Physical address + */ + iowrite32be(tbi_addr, ®s->tbipa); + + iowrite32be(0, ®s->tmr_ctrl); + + if (cfg->ptp_tsu_en) { + tmp = 0; + tmp |= TMR_PEVENT_TSRE; + iowrite32be(tmp, ®s->tmr_pevent); + + if (cfg->ptp_exception_en) { + tmp = 0; + tmp |= TMR_PEMASK_TSREEN; + iowrite32be(tmp, ®s->tmr_pemask); + } + } + + tmp = 0; + tmp |= MACCFG1_RX_FLOW; + tmp |= MACCFG1_TX_FLOW; + iowrite32be(tmp, ®s->maccfg1); + + tmp = 0; + + if (iface_speed < SPEED_1000) + tmp |= MACCFG2_NIBBLE_MODE; + else if (iface_speed == SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + + tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) & + MACCFG2_PREAMBLE_LENGTH_MASK; + if (cfg->tx_pad_crc) + tmp |= MACCFG2_PAD_CRC_EN; + /* Full Duplex */ + tmp |= MACCFG2_FULL_DUPLEX; + iowrite32be(tmp, ®s->maccfg2); + + tmp = (((cfg->non_back_to_back_ipg1 << + IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT) + & IPGIFG_NON_BACK_TO_BACK_IPG_1) + | ((cfg->non_back_to_back_ipg2 << + IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT) + & IPGIFG_NON_BACK_TO_BACK_IPG_2) + | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT) + & IPGIFG_MIN_IFG_ENFORCEMENT) + | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG)); + iowrite32be(tmp, ®s->ipgifg); + + tmp = 0; + tmp |= HAFDUP_EXCESS_DEFER; + tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT) + & HAFDUP_RETRANSMISSION_MAX); + tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW); + + iowrite32be(tmp, ®s->hafdup); + + /* Initialize Maximum frame length */ + iowrite32be(cfg->maximum_frame, ®s->maxfrm); + + iowrite32be(0xffffffff, ®s->cam1); + iowrite32be(0xffffffff, ®s->cam2); + + iowrite32be(exception_mask, ®s->imask); + + iowrite32be(0xffffffff, ®s->ievent); + + tmp = (u32)((macaddr[5] << 24) | + (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]); + iowrite32be(tmp, ®s->macstnaddr1); + + tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16)); + iowrite32be(tmp, ®s->macstnaddr2); + + /* HASH */ + for (i = 0; i < NUM_OF_HASH_REGS; i++) { + /* Initialize IADDRx */ + iowrite32be(0, ®s->igaddr[i]); + /* Initialize GADDRx */ + iowrite32be(0, ®s->gaddr[i]); + } + + return 0; +} + +static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr) +{ + u32 tmp; + + tmp = (u32)((adr[5] << 24) | + (adr[4] << 16) | (adr[3] << 8) | adr[2]); + iowrite32be(tmp, ®s->macstnaddr1); + + tmp = (u32)((adr[1] << 24) | (adr[0] << 16)); + iowrite32be(tmp, ®s->macstnaddr2); +} + +static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, + bool enable) +{ + int reg_idx = (bucket >> 5) & 0xf; + int bit_idx = bucket & 0x1f; + u32 bit_mask = 0x80000000 >> bit_idx; + u32 __iomem *reg; + + if (reg_idx > 7) + reg = ®s->gaddr[reg_idx - 8]; + else + reg = ®s->igaddr[reg_idx]; + + if (enable) + iowrite32be(ioread32be(reg) | bit_mask, reg); + else + iowrite32be(ioread32be(reg) & (~bit_mask), reg); +} + +static int check_init_parameters(struct fman_mac *dtsec) +{ + if (dtsec->max_speed >= SPEED_10000) { + pr_err("1G MAC driver supports 1G or lower speeds\n"); + return -EINVAL; + } + if (dtsec->addr == 0) { + pr_err("Ethernet MAC Must have a valid MAC Address\n"); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->rx_prepend > + MAX_PACKET_ALIGNMENT) { + pr_err("packetAlignmentPadding can't be > than %d\n", + MAX_PACKET_ALIGNMENT); + return -EINVAL; + } + if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 > + MAX_INTER_PACKET_GAP) || + ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 > + MAX_INTER_PACKET_GAP) || + ((dtsec->dtsec_drv_param)->back_to_back_ipg > + MAX_INTER_PACKET_GAP)) { + pr_err("Inter packet gap can't be greater than %d\n", + MAX_INTER_PACKET_GAP); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->halfdup_retransmit > + MAX_RETRANSMISSION) { + pr_err("maxRetransmission can't be greater than %d\n", + MAX_RETRANSMISSION); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->halfdup_coll_window > + MAX_COLLISION_WINDOW) { + pr_err("collisionWindow can't be greater than %d\n", + MAX_COLLISION_WINDOW); + return -EINVAL; + /* If Auto negotiation process is disabled, need to set up the PHY + * using the MII Management Interface + */ + } + if (!dtsec->exception_cb) { + pr_err("uninitialized exception_cb\n"); + return -EINVAL; + } + if (!dtsec->event_cb) { + pr_err("uninitialized event_cb\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_1G_BAB_RX: + bit_mask = DTSEC_IMASK_BREN; + break; + case FM_MAC_EX_1G_RX_CTL: + bit_mask = DTSEC_IMASK_RXCEN; + break; + case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET: + bit_mask = DTSEC_IMASK_GTSCEN; + break; + case FM_MAC_EX_1G_BAB_TX: + bit_mask = DTSEC_IMASK_BTEN; + break; + case FM_MAC_EX_1G_TX_CTL: + bit_mask = DTSEC_IMASK_TXCEN; + break; + case FM_MAC_EX_1G_TX_ERR: + bit_mask = DTSEC_IMASK_TXEEN; + break; + case FM_MAC_EX_1G_LATE_COL: + bit_mask = DTSEC_IMASK_LCEN; + break; + case FM_MAC_EX_1G_COL_RET_LMT: + bit_mask = DTSEC_IMASK_CRLEN; + break; + case FM_MAC_EX_1G_TX_FIFO_UNDRN: + bit_mask = DTSEC_IMASK_XFUNEN; + break; + case FM_MAC_EX_1G_MAG_PCKT: + bit_mask = DTSEC_IMASK_MAGEN; + break; + case FM_MAC_EX_1G_MII_MNG_RD_COMPLET: + bit_mask = DTSEC_IMASK_MMRDEN; + break; + case FM_MAC_EX_1G_MII_MNG_WR_COMPLET: + bit_mask = DTSEC_IMASK_MMWREN; + break; + case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET: + bit_mask = DTSEC_IMASK_GRSCEN; + break; + case FM_MAC_EX_1G_DATA_ERR: + bit_mask = DTSEC_IMASK_TDPEEN; + break; + case FM_MAC_EX_1G_RX_MIB_CNT_OVFL: + bit_mask = DTSEC_IMASK_MSROEN; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static bool is_init_done(struct dtsec_cfg *dtsec_drv_params) +{ + /* Checks if dTSEC driver parameters were initialized */ + if (!dtsec_drv_params) + return true; + + return false; +} + +static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (is_init_done(dtsec->dtsec_drv_param)) + return 0; + + return (u16)ioread32be(®s->maxfrm); +} + +static void dtsec_isr(void *handle) +{ + struct fman_mac *dtsec = (struct fman_mac *)handle; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 event; + + /* do not handle MDIO events */ + event = ioread32be(®s->ievent) & + (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN)); + + event &= ioread32be(®s->imask); + + iowrite32be(event, ®s->ievent); + + if (event & DTSEC_IMASK_BREN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX); + if (event & DTSEC_IMASK_RXCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL); + if (event & DTSEC_IMASK_GTSCEN) + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET); + if (event & DTSEC_IMASK_BTEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX); + if (event & DTSEC_IMASK_TXCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL); + if (event & DTSEC_IMASK_TXEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR); + if (event & DTSEC_IMASK_LCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL); + if (event & DTSEC_IMASK_CRLEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); + if (event & DTSEC_IMASK_XFUNEN) { + /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ + if (dtsec->fm_rev_info.major == 2) { + u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; + /* a. Write 0x00E0_0C00 to DTSEC_ID + * This is a read only register + * b. Read and save the value of TPKT + */ + tpkt1 = ioread32be(®s->tpkt); + + /* c. Read the register at dTSEC address offset 0x32C */ + tmp_reg1 = ioread32be(®s->reserved02c0[27]); + + /* d. Compare bits [9:15] to bits [25:31] of the + * register at address offset 0x32C. + */ + if ((tmp_reg1 & 0x007F0000) != + (tmp_reg1 & 0x0000007F)) { + /* If they are not equal, save the value of + * this register and wait for at least + * MAXFRM*16 ns + */ + usleep_range((u32)(min + (dtsec_get_max_frame_length(dtsec) * + 16 / 1000, 1)), (u32) + (min(dtsec_get_max_frame_length + (dtsec) * 16 / 1000, 1) + 1)); + } + + /* e. Read and save TPKT again and read the register + * at dTSEC address offset 0x32C again + */ + tpkt2 = ioread32be(®s->tpkt); + tmp_reg2 = ioread32be(®s->reserved02c0[27]); + + /* f. Compare the value of TPKT saved in step b to + * value read in step e. Also compare bits [9:15] of + * the register at offset 0x32C saved in step d to the + * value of bits [9:15] saved in step e. If the two + * registers values are unchanged, then the transmit + * portion of the dTSEC controller is locked up and + * the user should proceed to the recover sequence. + */ + if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) == + (tmp_reg2 & 0x007F0000))) { + /* recover sequence */ + + /* a.Write a 1 to RCTRL[GRS] */ + + iowrite32be(ioread32be(®s->rctrl) | + RCTRL_GRS, ®s->rctrl); + + /* b.Wait until IEVENT[GRSC]=1, or at least + * 100 us has elapsed. + */ + for (i = 0; i < 100; i++) { + if (ioread32be(®s->ievent) & + DTSEC_IMASK_GRSCEN) + break; + udelay(1); + } + if (ioread32be(®s->ievent) & + DTSEC_IMASK_GRSCEN) + iowrite32be(DTSEC_IMASK_GRSCEN, + ®s->ievent); + else + pr_debug("Rx lockup due to Tx lockup\n"); + + /* c.Write a 1 to bit n of FM_RSTC + * (offset 0x0CC of FPM) + */ + fman_reset_mac(dtsec->fm, dtsec->mac_id); + + /* d.Wait 4 Tx clocks (32 ns) */ + udelay(1); + + /* e.Write a 0 to bit n of FM_RSTC. */ + /* cleared by FMAN + */ + } + } + + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN); + } + if (event & DTSEC_IMASK_MAGEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT); + if (event & DTSEC_IMASK_GRSCEN) + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET); + if (event & DTSEC_IMASK_TDPEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR); + if (event & DTSEC_IMASK_RDPEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR); + + /* masked interrupts */ + WARN_ON(event & DTSEC_IMASK_ABRTEN); + WARN_ON(event & DTSEC_IMASK_IFERREN); +} + +static void dtsec_1588_isr(void *handle) +{ + struct fman_mac *dtsec = (struct fman_mac *)handle; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 event; + + if (dtsec->ptp_tsu_enabled) { + event = ioread32be(®s->tmr_pevent); + event &= ioread32be(®s->tmr_pemask); + + if (event) { + iowrite32be(event, ®s->tmr_pevent); + WARN_ON(event & TMR_PEVENT_TSRE); + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_1588_TS_RX_ERR); + } + } +} + +static void free_init_resources(struct fman_mac *dtsec) +{ + fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_ERR); + fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_NORMAL); + + /* release the driver's group hash table */ + free_hash_table(dtsec->multicast_addr_hash); + dtsec->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(dtsec->unicast_addr_hash); + dtsec->unicast_addr_hash = NULL; +} + +int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val) +{ + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + dtsec->dtsec_drv_param->maximum_frame = new_val; + + return 0; +} + +int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val) +{ + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + dtsec->dtsec_drv_param->tx_pad_crc = new_val; + + return 0; +} + +int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Enable */ + tmp = ioread32be(®s->maccfg1); + if (mode & COMM_MODE_RX) + tmp |= MACCFG1_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= MACCFG1_TX_EN; + + iowrite32be(tmp, ®s->maccfg1); + + /* Graceful start - clear the graceful receive stop bit */ + if (mode & COMM_MODE_TX) + iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS, + ®s->tctrl); + if (mode & COMM_MODE_RX) + iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, + ®s->rctrl); + + return 0; +} + +int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Gracefull stop - Assert the graceful transmit stop bit */ + if (mode & COMM_MODE_RX) { + tmp = ioread32be(®s->rctrl) | RCTRL_GRS; + iowrite32be(tmp, ®s->rctrl); + + if (dtsec->fm_rev_info.major == 2) + usleep_range(100, 200); + else + udelay(10); + } + + if (mode & COMM_MODE_TX) { + if (dtsec->fm_rev_info.major == 2) + pr_debug("GTS not supported due to DTSEC_A004 errata.\n"); + else + pr_debug("GTS not supported due to DTSEC_A0014 errata.\n"); + } + + tmp = ioread32be(®s->maccfg1); + if (mode & COMM_MODE_RX) + tmp &= ~MACCFG1_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~MACCFG1_TX_EN; + + iowrite32be(tmp, ®s->maccfg1); + + return 0; +} + +int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, + u8 __maybe_unused priority, + u16 pause_time, u16 __maybe_unused thresh_time) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 ptv = 0; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ + if (dtsec->fm_rev_info.major == 2) + if (pause_time <= 320) { + pr_warn("pause-time: %d illegal.Should be > 320\n", + pause_time); + return -EINVAL; + } + + if (pause_time) { + ptv = ioread32be(®s->ptv); + ptv &= PTV_PTE_MASK; + ptv |= pause_time & PTV_PT_MASK; + iowrite32be(ptv, ®s->ptv); + + /* trigger the transmission of a flow-control pause frame */ + iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW, + ®s->maccfg1); + } else + iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, + ®s->maccfg1); + + return 0; +} + +int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->maccfg1); + if (en) + tmp |= MACCFG1_RX_FLOW; + else + tmp &= ~MACCFG1_RX_FLOW; + iowrite32be(tmp, ®s->maccfg1); + + return 0; +} + +int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) +{ + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Initialize MAC Station Address registers (1 & 2) + * Station address have to be swapped (big endian to little endian + */ + dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); + set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + + return 0; +} + +int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct eth_hash_entry *hash_entry; + u64 addr; + s32 bucket; + u32 crc = 0xFFFFFFFF; + bool mcast, ghtx; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); + mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false); + + /* Cannot handle unicast mac addr when GHTX is on */ + if (ghtx && !mcast) { + pr_err("Could not compute hash bucket\n"); + return -EINVAL; + } + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + + /* considering the 9 highest order bits in crc H[8:0]: + *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register + *and H[5:1] (next 5 bits) identify the hash bit + *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register + *and H[4:0] (next 5 bits) identify the hash bit. + * + *In bucket index output the low 5 bits identify the hash register + *bit, while the higher 4 bits identify the hash register + */ + + if (ghtx) { + bucket = (s32)((crc >> 23) & 0x1ff); + } else { + bucket = (s32)((crc >> 24) & 0xff); + /* if !ghtx and mcast the bit must be set in gaddr instead of + *igaddr. + */ + if (mcast) + bucket += 0x100; + } + + set_bucket(dtsec->regs, bucket, true); + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + if (addr & MAC_GROUP_ADDRESS) + /* Group Address */ + list_add_tail(&hash_entry->node, + &dtsec->multicast_addr_hash->lsts[bucket]); + else + list_add_tail(&hash_entry->node, + &dtsec->unicast_addr_hash->lsts[bucket]); + + return 0; +} + +int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct list_head *pos; + struct eth_hash_entry *hash_entry = NULL; + u64 addr; + s32 bucket; + u32 crc = 0xFFFFFFFF; + bool mcast, ghtx; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); + mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false); + + /* Cannot handle unicast mac addr when GHTX is on */ + if (ghtx && !mcast) { + pr_err("Could not compute hash bucket\n"); + return -EINVAL; + } + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + + if (ghtx) { + bucket = (s32)((crc >> 23) & 0x1ff); + } else { + bucket = (s32)((crc >> 24) & 0xff); + /* if !ghtx and mcast the bit must be set + * in gaddr instead of igaddr. + */ + if (mcast) + bucket += 0x100; + } + + if (addr & MAC_GROUP_ADDRESS) { + /* Group Address */ + list_for_each(pos, + &dtsec->multicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket])) + set_bucket(dtsec->regs, bucket, false); + } else { + /* Individual Address */ + list_for_each(pos, + &dtsec->unicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket])) + set_bucket(dtsec->regs, bucket, false); + } + + /* address does not exist */ + WARN_ON(!hash_entry); + + return 0; +} + +int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Set unicast promiscuous */ + tmp = ioread32be(®s->rctrl); + if (new_val) + tmp |= RCTRL_UPROM; + else + tmp &= ~RCTRL_UPROM; + + iowrite32be(tmp, ®s->rctrl); + + /* Set multicast promiscuous */ + tmp = ioread32be(®s->rctrl); + if (new_val) + tmp |= RCTRL_MPROM; + else + tmp &= ~RCTRL_MPROM; + + iowrite32be(tmp, ®s->rctrl); + + return 0; +} + +int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->maccfg2); + + /* Full Duplex */ + tmp |= MACCFG2_FULL_DUPLEX; + + tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE); + if (speed < SPEED_1000) + tmp |= MACCFG2_NIBBLE_MODE; + else if (speed == SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + iowrite32be(tmp, ®s->maccfg2); + + tmp = ioread32be(®s->ecntrl); + if (speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + else + tmp &= ~DTSEC_ECNTRL_R100M; + iowrite32be(tmp, ®s->ecntrl); + + return 0; +} + +int dtsec_restart_autoneg(struct fman_mac *dtsec) +{ + u16 tmp_reg16; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR); + + tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000); + tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART | + BMCR_FULLDPLX | BMCR_SPEED1000); + + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + + return 0; +} + +int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + *mac_version = ioread32be(®s->tsec_id); + + return 0; +} + +int dtsec_set_exception(struct fman_mac *dtsec, + enum fman_mac_exceptions exception, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 bit_mask = 0; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) { + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + dtsec->exceptions |= bit_mask; + else + dtsec->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + if (enable) + iowrite32be(ioread32be(®s->imask) | bit_mask, + ®s->imask); + else + iowrite32be(ioread32be(®s->imask) & ~bit_mask, + ®s->imask); + } else { + if (!dtsec->ptp_tsu_enabled) { + pr_err("Exception valid for 1588 only\n"); + return -EINVAL; + } + switch (exception) { + case FM_MAC_EX_1G_1588_TS_RX_ERR: + if (enable) { + dtsec->en_tsu_err_exeption = true; + iowrite32be(ioread32be(®s->tmr_pemask) | + TMR_PEMASK_TSREEN, + ®s->tmr_pemask); + } else { + dtsec->en_tsu_err_exeption = false; + iowrite32be(ioread32be(®s->tmr_pemask) & + ~TMR_PEMASK_TSREEN, + ®s->tmr_pemask); + } + break; + default: + pr_err("Undefined exception\n"); + return -EINVAL; + } + } + + return 0; +} + +int dtsec_init(struct fman_mac *dtsec) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct dtsec_cfg *dtsec_drv_param; + int err; + u16 max_frm_ln; + enet_addr_t eth_addr; + + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + if (DEFAULT_RESET_ON_INIT && + (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) { + pr_err("Can't reset MAC!\n"); + return -EINVAL; + } + + err = check_init_parameters(dtsec); + if (err) + return err; + + dtsec_drv_param = dtsec->dtsec_drv_param; + + MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr); + + err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if, + dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions, + dtsec->tbiphy->mdio.addr); + if (err) { + free_init_resources(dtsec); + pr_err("DTSEC version doesn't support this i/f mode\n"); + return err; + } + + if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) { + u16 tmp_reg16; + + /* Configure the TBI PHY Control Register */ + tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET; + phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + + tmp_reg16 = TBICON_CLK_SELECT; + phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + + tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE | + BMCR_FULLDPLX | BMCR_SPEED1000); + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + + if (dtsec->basex_if) + tmp_reg16 = TBIANA_1000X; + else + tmp_reg16 = TBIANA_SGMII; + phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16); + + tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART | + BMCR_FULLDPLX | BMCR_SPEED1000); + + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + } + + /* Max Frame Length */ + max_frm_ln = (u16)ioread32be(®s->maxfrm); + err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln); + if (err) { + pr_err("Setting max frame length failed\n"); + free_init_resources(dtsec); + return -EINVAL; + } + + dtsec->multicast_addr_hash = + alloc_hash_table(EXTENDED_HASH_TABLE_SIZE); + if (!dtsec->multicast_addr_hash) { + free_init_resources(dtsec); + pr_err("MC hash table is failed\n"); + return -ENOMEM; + } + + dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE); + if (!dtsec->unicast_addr_hash) { + free_init_resources(dtsec); + pr_err("UC hash table is failed\n"); + return -ENOMEM; + } + + /* register err intr handler for dtsec to FPM (err) */ + fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec); + /* register 1588 intr handler for TMR to FPM (normal) */ + fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec); + + kfree(dtsec_drv_param); + dtsec->dtsec_drv_param = NULL; + + return 0; +} + +int dtsec_free(struct fman_mac *dtsec) +{ + free_init_resources(dtsec); + + kfree(dtsec->dtsec_drv_param); + dtsec->dtsec_drv_param = NULL; + kfree(dtsec); + + return 0; +} + +struct fman_mac *dtsec_config(struct fman_mac_params *params) +{ + struct fman_mac *dtsec; + struct dtsec_cfg *dtsec_drv_param; + void __iomem *base_addr; + + base_addr = params->base_addr; + + /* allocate memory for the UCC GETH data structure. */ + dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL); + if (!dtsec) + return NULL; + + /* allocate memory for the d_tsec driver parameters data structure. */ + dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL); + if (!dtsec_drv_param) + goto err_dtsec; + + /* Plant parameter structure pointer */ + dtsec->dtsec_drv_param = dtsec_drv_param; + + set_dflts(dtsec_drv_param); + + dtsec->regs = base_addr; + dtsec->addr = ENET_ADDR_TO_UINT64(params->addr); + dtsec->max_speed = params->max_speed; + dtsec->phy_if = params->phy_if; + dtsec->mac_id = params->mac_id; + dtsec->exceptions = (DTSEC_IMASK_BREN | + DTSEC_IMASK_RXCEN | + DTSEC_IMASK_BTEN | + DTSEC_IMASK_TXCEN | + DTSEC_IMASK_TXEEN | + DTSEC_IMASK_ABRTEN | + DTSEC_IMASK_LCEN | + DTSEC_IMASK_CRLEN | + DTSEC_IMASK_XFUNEN | + DTSEC_IMASK_IFERREN | + DTSEC_IMASK_MAGEN | + DTSEC_IMASK_TDPEEN | + DTSEC_IMASK_RDPEEN); + dtsec->exception_cb = params->exception_cb; + dtsec->event_cb = params->event_cb; + dtsec->dev_id = params->dev_id; + dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en; + dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en; + + dtsec->fm = params->fm; + dtsec->basex_if = params->basex_if; + + if (!params->internal_phy_node) { + pr_err("TBI PHY node is not available\n"); + goto err_dtsec_drv_param; + } + + dtsec->tbiphy = of_phy_find_device(params->internal_phy_node); + if (!dtsec->tbiphy) { + pr_err("of_phy_find_device (TBI PHY) failed\n"); + put_device(&dtsec->tbiphy->mdio.dev); + goto err_dtsec_drv_param; + } + + put_device(&dtsec->tbiphy->mdio.dev); + + /* Save FMan revision */ + fman_get_revision(dtsec->fm, &dtsec->fm_rev_info); + + return dtsec; + +err_dtsec_drv_param: + kfree(dtsec_drv_param); +err_dtsec: + kfree(dtsec); + return NULL; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h new file mode 100644 index 000000000000..c4467c072058 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -0,0 +1,59 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DTSEC_H +#define __DTSEC_H + +#include "fman_mac.h" + +struct fman_mac *dtsec_config(struct fman_mac_params *params); +int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val); +int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr); +int dtsec_adjust_link(struct fman_mac *dtsec, + u16 speed); +int dtsec_restart_autoneg(struct fman_mac *dtsec); +int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val); +int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val); +int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode); +int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode); +int dtsec_init(struct fman_mac *dtsec); +int dtsec_free(struct fman_mac *dtsec); +int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en); +int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority, + u16 pause_time, u16 thresh_time); +int dtsec_set_exception(struct fman_mac *dtsec, + enum fman_mac_exceptions exception, bool enable); +int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); +int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); +int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); + +#endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h new file mode 100644 index 000000000000..8ddeedbcef9c --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -0,0 +1,278 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* FM MAC ... */ +#ifndef __FM_MAC_H +#define __FM_MAC_H + +#include "fman.h" + +#include <linux/slab.h> +#include <linux/phy.h> +#include <linux/if_ether.h> + +struct fman_mac; + +/* Ethernet Address */ +typedef u8 enet_addr_t[ETH_ALEN]; + +#define ENET_ADDR_TO_UINT64(_enet_addr) \ + (u64)(((u64)(_enet_addr)[0] << 40) | \ + ((u64)(_enet_addr)[1] << 32) | \ + ((u64)(_enet_addr)[2] << 24) | \ + ((u64)(_enet_addr)[3] << 16) | \ + ((u64)(_enet_addr)[4] << 8) | \ + ((u64)(_enet_addr)[5])) + +#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \ + do { \ + int i; \ + for (i = 0; i < ETH_ALEN; i++) \ + (_enet_addr)[i] = \ + (u8)((_addr64) >> ((5 - i) * 8)); \ + } while (0) + +/* defaults */ +#define DEFAULT_RESET_ON_INIT false + +/* PFC defines */ +#define FSL_FM_PAUSE_TIME_ENABLE 0xf000 +#define FSL_FM_PAUSE_TIME_DISABLE 0 +#define FSL_FM_PAUSE_THRESH_DEFAULT 0 + +#define FM_MAC_NO_PFC 0xff + +/* HASH defines */ +#define ETH_HASH_ENTRY_OBJ(ptr) \ + hlist_entry_safe(ptr, struct eth_hash_entry, node) + +/* Enumeration (bit flags) of communication modes (Transmit, + * receive or both). + */ +enum comm_mode { + COMM_MODE_NONE = 0, /* No transmit/receive communication */ + COMM_MODE_RX = 1, /* Only receive communication */ + COMM_MODE_TX = 2, /* Only transmit communication */ + COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */ +}; + +/* FM MAC Exceptions */ +enum fman_mac_exceptions { + FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0 + /* 10GEC MDIO scan event interrupt */ + , FM_MAC_EX_10G_MDIO_CMD_CMPL + /* 10GEC MDIO command completion interrupt */ + , FM_MAC_EX_10G_REM_FAULT + /* 10GEC, mEMAC Remote fault interrupt */ + , FM_MAC_EX_10G_LOC_FAULT + /* 10GEC, mEMAC Local fault interrupt */ + , FM_MAC_EX_10G_TX_ECC_ER + /* 10GEC, mEMAC Transmit frame ECC error interrupt */ + , FM_MAC_EX_10G_TX_FIFO_UNFL + /* 10GEC, mEMAC Transmit FIFO underflow interrupt */ + , FM_MAC_EX_10G_TX_FIFO_OVFL + /* 10GEC, mEMAC Transmit FIFO overflow interrupt */ + , FM_MAC_EX_10G_TX_ER + /* 10GEC Transmit frame error interrupt */ + , FM_MAC_EX_10G_RX_FIFO_OVFL + /* 10GEC, mEMAC Receive FIFO overflow interrupt */ + , FM_MAC_EX_10G_RX_ECC_ER + /* 10GEC, mEMAC Receive frame ECC error interrupt */ + , FM_MAC_EX_10G_RX_JAB_FRM + /* 10GEC Receive jabber frame interrupt */ + , FM_MAC_EX_10G_RX_OVRSZ_FRM + /* 10GEC Receive oversized frame interrupt */ + , FM_MAC_EX_10G_RX_RUNT_FRM + /* 10GEC Receive runt frame interrupt */ + , FM_MAC_EX_10G_RX_FRAG_FRM + /* 10GEC Receive fragment frame interrupt */ + , FM_MAC_EX_10G_RX_LEN_ER + /* 10GEC Receive payload length error interrupt */ + , FM_MAC_EX_10G_RX_CRC_ER + /* 10GEC Receive CRC error interrupt */ + , FM_MAC_EX_10G_RX_ALIGN_ER + /* 10GEC Receive alignment error interrupt */ + , FM_MAC_EX_1G_BAB_RX + /* dTSEC Babbling receive error */ + , FM_MAC_EX_1G_RX_CTL + /* dTSEC Receive control (pause frame) interrupt */ + , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET + /* dTSEC Graceful transmit stop complete */ + , FM_MAC_EX_1G_BAB_TX + /* dTSEC Babbling transmit error */ + , FM_MAC_EX_1G_TX_CTL + /* dTSEC Transmit control (pause frame) interrupt */ + , FM_MAC_EX_1G_TX_ERR + /* dTSEC Transmit error */ + , FM_MAC_EX_1G_LATE_COL + /* dTSEC Late collision */ + , FM_MAC_EX_1G_COL_RET_LMT + /* dTSEC Collision retry limit */ + , FM_MAC_EX_1G_TX_FIFO_UNDRN + /* dTSEC Transmit FIFO underrun */ + , FM_MAC_EX_1G_MAG_PCKT + /* dTSEC Magic Packet detection */ + , FM_MAC_EX_1G_MII_MNG_RD_COMPLET + /* dTSEC MII management read completion */ + , FM_MAC_EX_1G_MII_MNG_WR_COMPLET + /* dTSEC MII management write completion */ + , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET + /* dTSEC Graceful receive stop complete */ + , FM_MAC_EX_1G_DATA_ERR + /* dTSEC Internal data error on transmit */ + , FM_MAC_1G_RX_DATA_ERR + /* dTSEC Internal data error on receive */ + , FM_MAC_EX_1G_1588_TS_RX_ERR + /* dTSEC Time-Stamp Receive Error */ + , FM_MAC_EX_1G_RX_MIB_CNT_OVFL + /* dTSEC MIB counter overflow */ + , FM_MAC_EX_TS_FIFO_ECC_ERR + /* mEMAC Time-stamp FIFO ECC error interrupt; + * not supported on T4240/B4860 rev1 chips + */ + , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT + /* mEMAC Magic Packet Indication Interrupt */ +}; + +struct eth_hash_entry { + u64 addr; /* Ethernet Address */ + struct list_head node; +}; + +typedef void (fman_mac_exception_cb)(void *dev_id, + enum fman_mac_exceptions exceptions); + +/* FMan MAC config input */ +struct fman_mac_params { + /* Base of memory mapped FM MAC registers */ + void __iomem *base_addr; + /* MAC address of device; First octet is sent first */ + enet_addr_t addr; + /* MAC ID; numbering of dTSEC and 1G-mEMAC: + * 0 - FM_MAX_NUM_OF_1G_MACS; + * numbering of 10G-MAC (TGEC) and 10G-mEMAC: + * 0 - FM_MAX_NUM_OF_10G_MACS + */ + u8 mac_id; + /* PHY interface */ + phy_interface_t phy_if; + /* Note that the speed should indicate the maximum rate that + * this MAC should support rather than the actual speed; + */ + u16 max_speed; + /* A handle to the FM object this port related to */ + void *fm; + /* MDIO exceptions interrupt source - not valid for all + * MACs; MUST be set to 'NO_IRQ' for MACs that don't have + * mdio-irq, or for polling + */ + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ + fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ + /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC + * and phy or backplane; Note: 1000BaseX auto-negotiation relates only + * to interface between MAC and phy/backplane, SGMII phy can still + * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps + */ + bool basex_if; + /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */ + struct device_node *internal_phy_node; +}; + +struct eth_hash_t { + u16 size; + struct list_head *lsts; +}; + +static inline struct eth_hash_entry +*dequeue_addr_from_hash_entry(struct list_head *addr_lst) +{ + struct eth_hash_entry *hash_entry = NULL; + + if (!list_empty(addr_lst)) { + hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next); + list_del_init(&hash_entry->node); + } + return hash_entry; +} + +static inline void free_hash_table(struct eth_hash_t *hash) +{ + struct eth_hash_entry *hash_entry; + int i = 0; + + if (hash) { + if (hash->lsts) { + for (i = 0; i < hash->size; i++) { + hash_entry = + dequeue_addr_from_hash_entry(&hash->lsts[i]); + while (hash_entry) { + kfree(hash_entry); + hash_entry = + dequeue_addr_from_hash_entry(&hash-> + lsts[i]); + } + } + + kfree(hash->lsts); + } + + kfree(hash); + } +} + +static inline struct eth_hash_t *alloc_hash_table(u16 size) +{ + u32 i; + struct eth_hash_t *hash; + + /* Allocate address hash table */ + hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + if (!hash) + return NULL; + + hash->size = size; + + hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head), + GFP_KERNEL); + if (!hash->lsts) { + kfree(hash); + return NULL; + } + + for (i = 0; i < hash->size; i++) + INIT_LIST_HEAD(&hash->lsts[i]); + + return hash; +} + +#endif /* __FM_MAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c new file mode 100644 index 000000000000..45e98fd8b79e --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -0,0 +1,1170 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_memac.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/phy.h> +#include <linux/of_mdio.h> + +/* PCS registers */ +#define MDIO_SGMII_CR 0x00 +#define MDIO_SGMII_DEV_ABIL_SGMII 0x04 +#define MDIO_SGMII_LINK_TMR_L 0x12 +#define MDIO_SGMII_LINK_TMR_H 0x13 +#define MDIO_SGMII_IF_MODE 0x14 + +/* SGMII Control defines */ +#define SGMII_CR_AN_EN 0x1000 +#define SGMII_CR_RESTART_AN 0x0200 +#define SGMII_CR_FD 0x0100 +#define SGMII_CR_SPEED_SEL1_1G 0x0040 +#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \ + SGMII_CR_SPEED_SEL1_1G) + +/* SGMII Device Ability for SGMII defines */ +#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001 +#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0 + +/* Link timer define */ +#define LINK_TMR_L 0xa120 +#define LINK_TMR_H 0x0007 +#define LINK_TMR_L_BASEX 0xaf08 +#define LINK_TMR_H_BASEX 0x002f + +/* SGMII IF Mode defines */ +#define IF_MODE_USE_SGMII_AN 0x0002 +#define IF_MODE_SGMII_EN 0x0001 +#define IF_MODE_SGMII_SPEED_100M 0x0004 +#define IF_MODE_SGMII_SPEED_1G 0x0008 +#define IF_MODE_SGMII_DUPLEX_HALF 0x0010 + +/* Num of additional exact match MAC adr regs */ +#define MEMAC_NUM_OF_PADDRS 7 + +/* Control and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */ +#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */ +#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */ +#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */ +#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */ +#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */ +#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */ +#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */ +#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */ +#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */ +#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */ +#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */ + +/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */ +#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000 +#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF +#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000 +#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000 +#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019 +#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020 +#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060 + +#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \ +do { \ + _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \ + ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \ + (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\ + (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\ +} while (0) + +/* Interface Mode Register (IF_MODE) */ + +#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ +#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ +#define IF_MODE_RGMII 0x00000004 +#define IF_MODE_RGMII_AUTO 0x00008000 +#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */ +#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */ +#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */ +#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */ +#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */ +#define IF_MODE_HD 0x00000040 /* Half duplex operation */ + +/* Hash table Control Register (HASHTABLE_CTRL) */ +#define HASH_CTRL_MCAST_EN 0x00000100 +/* 26-31 Hash table address code */ +#define HASH_CTRL_ADDR_MASK 0x0000003F +/* MAC mcast indication */ +#define GROUP_ADDRESS 0x0000010000000000LL +#define HASH_TABLE_SIZE 64 /* Hash tbl size */ + +/* Interrupt Mask Register (IMASK) */ +#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */ +#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */ +#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */ +#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */ + +#define MEMAC_ALL_ERRS_IMASK \ + ((u32)(MEMAC_IMASK_TSECC_ER | \ + MEMAC_IMASK_TECC_ER | \ + MEMAC_IMASK_RECC_ER | \ + MEMAC_IMASK_MGI)) + +#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */ +#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */ +#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */ +#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */ +#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/ +#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */ +#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */ +#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */ +#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */ +#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */ +#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */ +#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */ +#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */ +#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */ +#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */ +#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */ +#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */ + +#define DEFAULT_PAUSE_QUANTA 0xf000 +#define DEFAULT_FRAME_LENGTH 0x600 +#define DEFAULT_TX_IPG_LENGTH 12 + +#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF +#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000 +#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF +#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000 + +struct mac_addr { + /* Lower 32 bits of 48-bit MAC address */ + u32 mac_addr_l; + /* Upper 16 bits of 48-bit MAC address */ + u32 mac_addr_u; +}; + +/* memory map */ +struct memac_regs { + u32 res0000[2]; /* General Control and Status */ + u32 command_config; /* 0x008 Ctrl and cfg */ + struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */ + u32 maxfrm; /* 0x014 Max frame length */ + u32 res0018[1]; + u32 rx_fifo_sections; /* Receive FIFO configuration reg */ + u32 tx_fifo_sections; /* Transmit FIFO configuration reg */ + u32 res0024[2]; + u32 hashtable_ctrl; /* 0x02C Hash table control */ + u32 res0030[4]; + u32 ievent; /* 0x040 Interrupt event */ + u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */ + u32 res0048; + u32 imask; /* 0x04C Interrupt mask */ + u32 res0050; + u32 pause_quanta[4]; /* 0x054 Pause quanta */ + u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */ + u32 rx_pause_status; /* 0x074 Receive pause status */ + u32 res0078[2]; + struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */ + u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */ + u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */ + u32 res00c0[8]; + u32 statn_config; /* 0x0E0 Statistics configuration */ + u32 res00e4[7]; + /* Rx Statistics Counter */ + u32 reoct_l; + u32 reoct_u; + u32 roct_l; + u32 roct_u; + u32 raln_l; + u32 raln_u; + u32 rxpf_l; + u32 rxpf_u; + u32 rfrm_l; + u32 rfrm_u; + u32 rfcs_l; + u32 rfcs_u; + u32 rvlan_l; + u32 rvlan_u; + u32 rerr_l; + u32 rerr_u; + u32 ruca_l; + u32 ruca_u; + u32 rmca_l; + u32 rmca_u; + u32 rbca_l; + u32 rbca_u; + u32 rdrp_l; + u32 rdrp_u; + u32 rpkt_l; + u32 rpkt_u; + u32 rund_l; + u32 rund_u; + u32 r64_l; + u32 r64_u; + u32 r127_l; + u32 r127_u; + u32 r255_l; + u32 r255_u; + u32 r511_l; + u32 r511_u; + u32 r1023_l; + u32 r1023_u; + u32 r1518_l; + u32 r1518_u; + u32 r1519x_l; + u32 r1519x_u; + u32 rovr_l; + u32 rovr_u; + u32 rjbr_l; + u32 rjbr_u; + u32 rfrg_l; + u32 rfrg_u; + u32 rcnp_l; + u32 rcnp_u; + u32 rdrntp_l; + u32 rdrntp_u; + u32 res01d0[12]; + /* Tx Statistics Counter */ + u32 teoct_l; + u32 teoct_u; + u32 toct_l; + u32 toct_u; + u32 res0210[2]; + u32 txpf_l; + u32 txpf_u; + u32 tfrm_l; + u32 tfrm_u; + u32 tfcs_l; + u32 tfcs_u; + u32 tvlan_l; + u32 tvlan_u; + u32 terr_l; + u32 terr_u; + u32 tuca_l; + u32 tuca_u; + u32 tmca_l; + u32 tmca_u; + u32 tbca_l; + u32 tbca_u; + u32 res0258[2]; + u32 tpkt_l; + u32 tpkt_u; + u32 tund_l; + u32 tund_u; + u32 t64_l; + u32 t64_u; + u32 t127_l; + u32 t127_u; + u32 t255_l; + u32 t255_u; + u32 t511_l; + u32 t511_u; + u32 t1023_l; + u32 t1023_u; + u32 t1518_l; + u32 t1518_u; + u32 t1519x_l; + u32 t1519x_u; + u32 res02a8[6]; + u32 tcnp_l; + u32 tcnp_u; + u32 res02c8[14]; + /* Line Interface Control */ + u32 if_mode; /* 0x300 Interface Mode Control */ + u32 if_status; /* 0x304 Interface Status */ + u32 res0308[14]; + /* HiGig/2 */ + u32 hg_config; /* 0x340 Control and cfg */ + u32 res0344[3]; + u32 hg_pause_quanta; /* 0x350 Pause quanta */ + u32 res0354[3]; + u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */ + u32 res0364[3]; + u32 hgrx_pause_status; /* 0x370 Receive pause status */ + u32 hg_fifos_status; /* 0x374 fifos status */ + u32 rhm; /* 0x378 rx messages counter */ + u32 thm; /* 0x37C tx messages counter */ +}; + +struct memac_cfg { + bool reset_on_init; + bool pause_ignore; + bool promiscuous_mode_enable; + struct fixed_phy_status *fixed_link; + u16 max_frame_length; + u16 pause_quanta; + u32 tx_ipg_length; +}; + +struct fman_mac { + /* Pointer to MAC memory mapped registers */ + struct memac_regs __iomem *regs; + /* MAC address of device */ + u64 addr; + /* Ethernet physical interface */ + phy_interface_t phy_if; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* Pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* Pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + struct memac_cfg *memac_drv_param; + void *fm; + struct fman_rev_info fm_rev_info; + bool basex_if; + struct phy_device *pcsphy; +}; + +static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr, + u8 paddr_num) +{ + u32 tmp0, tmp1; + + tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24); + tmp1 = (u32)(adr[4] | adr[5] << 8); + + if (paddr_num == 0) { + iowrite32be(tmp0, ®s->mac_addr0.mac_addr_l); + iowrite32be(tmp1, ®s->mac_addr0.mac_addr_u); + } else { + iowrite32be(tmp0, ®s->mac_addr[paddr_num - 1].mac_addr_l); + iowrite32be(tmp1, ®s->mac_addr[paddr_num - 1].mac_addr_u); + } +} + +static int reset(struct memac_regs __iomem *regs) +{ + u32 tmp; + int count; + + tmp = ioread32be(®s->command_config); + + tmp |= CMD_CFG_SW_RESET; + + iowrite32be(tmp, ®s->command_config); + + count = 100; + do { + udelay(1); + } while ((ioread32be(®s->command_config) & CMD_CFG_SW_RESET) && + --count); + + if (count == 0) + return -EBUSY; + + return 0; +} + +static void set_exception(struct memac_regs __iomem *regs, u32 val, + bool enable) +{ + u32 tmp; + + tmp = ioread32be(®s->imask); + if (enable) + tmp |= val; + else + tmp &= ~val; + + iowrite32be(tmp, ®s->imask); +} + +static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, + phy_interface_t phy_if, u16 speed, bool slow_10g_if, + u32 exceptions) +{ + u32 tmp; + + /* Config */ + tmp = 0; + if (cfg->promiscuous_mode_enable) + tmp |= CMD_CFG_PROMIS_EN; + if (cfg->pause_ignore) + tmp |= CMD_CFG_PAUSE_IGNORE; + + /* Payload length check disable */ + tmp |= CMD_CFG_NO_LEN_CHK; + /* Enable padding of frames in transmit direction */ + tmp |= CMD_CFG_TX_PAD_EN; + + tmp |= CMD_CFG_CRC_FWD; + + iowrite32be(tmp, ®s->command_config); + + /* Max Frame Length */ + iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm); + + /* Pause Time */ + iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]); + iowrite32be((u32)0, ®s->pause_thresh[0]); + + /* IF_MODE */ + tmp = 0; + switch (phy_if) { + case PHY_INTERFACE_MODE_XGMII: + tmp |= IF_MODE_XGMII; + break; + default: + tmp |= IF_MODE_GMII; + if (phy_if == PHY_INTERFACE_MODE_RGMII) + tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO; + } + iowrite32be(tmp, ®s->if_mode); + + /* TX_FIFO_SECTIONS */ + tmp = 0; + if (phy_if == PHY_INTERFACE_MODE_XGMII) { + if (slow_10g_if) { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); + } else { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); + } + } else { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G); + } + iowrite32be(tmp, ®s->tx_fifo_sections); + + /* clear all pending events and set-up interrupts */ + iowrite32be(0xffffffff, ®s->ievent); + set_exception(regs, exceptions, true); + + return 0; +} + +static void set_dflts(struct memac_cfg *cfg) +{ + cfg->reset_on_init = false; + cfg->promiscuous_mode_enable = false; + cfg->pause_ignore = false; + cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH; + cfg->max_frame_length = DEFAULT_FRAME_LENGTH; + cfg->pause_quanta = DEFAULT_PAUSE_QUANTA; +} + +static u32 get_mac_addr_hash_code(u64 eth_addr) +{ + u64 mask1, mask2; + u32 xor_val = 0; + u8 i, j; + + for (i = 0; i < 6; i++) { + mask1 = eth_addr & (u64)0x01; + eth_addr >>= 1; + + for (j = 0; j < 7; j++) { + mask2 = eth_addr & (u64)0x01; + mask1 ^= mask2; + eth_addr >>= 1; + } + + xor_val |= (mask1 << (5 - i)); + } + + return xor_val; +} + +static void setup_sgmii_internal_phy(struct fman_mac *memac, + struct fixed_phy_status *fixed_link) +{ + u16 tmp_reg16; + + /* SGMII mode */ + tmp_reg16 = IF_MODE_SGMII_EN; + if (!fixed_link) + /* AN enable */ + tmp_reg16 |= IF_MODE_USE_SGMII_AN; + else { + switch (fixed_link->speed) { + case 10: + /* For 10M: IF_MODE[SPEED_10M] = 0 */ + break; + case 100: + tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; + break; + case 1000: /* fallthrough */ + default: + tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; + break; + } + if (!fixed_link->duplex) + tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF; + } + phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16); + + /* Device ability according to SGMII specification */ + tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE; + phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); + + /* Adjust link timer for SGMII - + * According to Cisco SGMII specification the timer should be 1.6 ms. + * The link_timer register is configured in units of the clock. + * - When running as 1G SGMII, Serdes clock is 125 MHz, so + * unit = 1 / (125*10^6 Hz) = 8 ns. + * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40 + * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so + * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. + * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120. + * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, + * we always set up here a value of 2.5 SGMII. + */ + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H); + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L); + + if (!fixed_link) + /* Restart AN */ + tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; + else + /* AN disabled */ + tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN; + phy_write(memac->pcsphy, 0x0, tmp_reg16); +} + +static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac) +{ + u16 tmp_reg16; + + /* AN Device capability */ + tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE; + phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); + + /* Adjust link timer for SGMII - + * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms. + * The link_timer register is configured in units of the clock. + * - When running as 1G SGMII, Serdes clock is 125 MHz, so + * unit = 1 / (125*10^6 Hz) = 8 ns. + * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0 + * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so + * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. + * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08. + * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, + * we always set up here a value of 2.5 SGMII. + */ + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX); + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX); + + /* Restart AN */ + tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; + phy_write(memac->pcsphy, 0x0, tmp_reg16); +} + +static int check_init_parameters(struct fman_mac *memac) +{ + if (memac->addr == 0) { + pr_err("Ethernet MAC must have a valid MAC address\n"); + return -EINVAL; + } + if (!memac->exception_cb) { + pr_err("Uninitialized exception handler\n"); + return -EINVAL; + } + if (!memac->event_cb) { + pr_warn("Uninitialize event handler\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_10G_TX_ECC_ER: + bit_mask = MEMAC_IMASK_TECC_ER; + break; + case FM_MAC_EX_10G_RX_ECC_ER: + bit_mask = MEMAC_IMASK_RECC_ER; + break; + case FM_MAC_EX_TS_FIFO_ECC_ERR: + bit_mask = MEMAC_IMASK_TSECC_ER; + break; + case FM_MAC_EX_MAGIC_PACKET_INDICATION: + bit_mask = MEMAC_IMASK_MGI; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static void memac_err_exception(void *handle) +{ + struct fman_mac *memac = (struct fman_mac *)handle; + struct memac_regs __iomem *regs = memac->regs; + u32 event, imask; + + event = ioread32be(®s->ievent); + imask = ioread32be(®s->imask); + + /* Imask include both error and notification/event bits. + * Leaving only error bits enabled by imask. + * The imask error bits are shifted by 16 bits offset from + * their corresponding location in the ievent - hence the >> 16 + */ + event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); + + iowrite32be(event, ®s->ievent); + + if (event & MEMAC_IEVNT_TS_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR); + if (event & MEMAC_IEVNT_TX_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER); + if (event & MEMAC_IEVNT_RX_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER); +} + +static void memac_exception(void *handle) +{ + struct fman_mac *memac = (struct fman_mac *)handle; + struct memac_regs __iomem *regs = memac->regs; + u32 event, imask; + + event = ioread32be(®s->ievent); + imask = ioread32be(®s->imask); + + /* Imask include both error and notification/event bits. + * Leaving only error bits enabled by imask. + * The imask error bits are shifted by 16 bits offset from + * their corresponding location in the ievent - hence the >> 16 + */ + event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); + + iowrite32be(event, ®s->ievent); + + if (event & MEMAC_IEVNT_MGI) + memac->exception_cb(memac->dev_id, + FM_MAC_EX_MAGIC_PACKET_INDICATION); +} + +static void free_init_resources(struct fman_mac *memac) +{ + fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_ERR); + + fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_NORMAL); + + /* release the driver's group hash table */ + free_hash_table(memac->multicast_addr_hash); + memac->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(memac->unicast_addr_hash); + memac->unicast_addr_hash = NULL; +} + +static bool is_init_done(struct memac_cfg *memac_drv_params) +{ + /* Checks if mEMAC driver parameters were initialized */ + if (!memac_drv_params) + return true; + + return false; +} + +int memac_enable(struct fman_mac *memac, enum comm_mode mode) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp |= CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= CMD_CFG_TX_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_disable(struct fman_mac *memac, enum comm_mode mode) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp &= ~CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~CMD_CFG_TX_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_set_promiscuous(struct fman_mac *memac, bool new_val) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (new_val) + tmp |= CMD_CFG_PROMIS_EN; + else + tmp &= ~CMD_CFG_PROMIS_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_adjust_link(struct fman_mac *memac, u16 speed) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->if_mode); + + /* Set full duplex */ + tmp &= ~IF_MODE_HD; + + if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) { + /* Configure RGMII in manual mode */ + tmp &= ~IF_MODE_RGMII_AUTO; + tmp &= ~IF_MODE_RGMII_SP_MASK; + /* Full duplex */ + tmp |= IF_MODE_RGMII_FD; + + switch (speed) { + case SPEED_1000: + tmp |= IF_MODE_RGMII_1000; + break; + case SPEED_100: + tmp |= IF_MODE_RGMII_100; + break; + case SPEED_10: + tmp |= IF_MODE_RGMII_10; + break; + default: + break; + } + } + + iowrite32be(tmp, ®s->if_mode); + + return 0; +} + +int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->max_frame_length = new_val; + + return 0; +} + +int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->reset_on_init = enable; + + return 0; +} + +int memac_cfg_fixed_link(struct fman_mac *memac, + struct fixed_phy_status *fixed_link) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->fixed_link = fixed_link; + + return 0; +} + +int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + u16 pause_time, u16 thresh_time) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->tx_fifo_sections); + + GET_TX_EMPTY_DEFAULT_VALUE(tmp); + iowrite32be(tmp, ®s->tx_fifo_sections); + + tmp = ioread32be(®s->command_config); + tmp &= ~CMD_CFG_PFC_MODE; + priority = 0; + + iowrite32be(tmp, ®s->command_config); + + tmp = ioread32be(®s->pause_quanta[priority / 2]); + if (priority % 2) + tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT; + else + tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT; + tmp |= ((u32)pause_time << (16 * (priority % 2))); + iowrite32be(tmp, ®s->pause_quanta[priority / 2]); + + tmp = ioread32be(®s->pause_thresh[priority / 2]); + if (priority % 2) + tmp &= CLXY_PAUSE_THRESH_CLX_QTH; + else + tmp &= CLXY_PAUSE_THRESH_CLY_QTH; + tmp |= ((u32)thresh_time << (16 * (priority % 2))); + iowrite32be(tmp, ®s->pause_thresh[priority / 2]); + + return 0; +} + +int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (en) + tmp &= ~CMD_CFG_PAUSE_IGNORE; + else + tmp |= CMD_CFG_PAUSE_IGNORE; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr) +{ + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0); + + return 0; +} + +int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) +{ + struct memac_regs __iomem *regs = memac->regs; + struct eth_hash_entry *hash_entry; + u32 hash; + u64 addr; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + if (!(addr & GROUP_ADDRESS)) { + /* Unicast addresses not supported in hash */ + pr_err("Unicast Address\n"); + return -EINVAL; + } + hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + list_add_tail(&hash_entry->node, + &memac->multicast_addr_hash->lsts[hash]); + iowrite32be(hash | HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl); + + return 0; +} + +int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) +{ + struct memac_regs __iomem *regs = memac->regs; + struct eth_hash_entry *hash_entry = NULL; + struct list_head *pos; + u32 hash; + u64 addr; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; + + list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&memac->multicast_addr_hash->lsts[hash])) + iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl); + + return 0; +} + +int memac_set_exception(struct fman_mac *memac, + enum fman_mac_exceptions exception, bool enable) +{ + u32 bit_mask = 0; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + memac->exceptions |= bit_mask; + else + memac->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + set_exception(memac->regs, bit_mask, enable); + + return 0; +} + +int memac_init(struct fman_mac *memac) +{ + struct memac_cfg *memac_drv_param; + u8 i; + enet_addr_t eth_addr; + bool slow_10g_if = false; + struct fixed_phy_status *fixed_link; + int err; + u32 reg32 = 0; + + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + err = check_init_parameters(memac); + if (err) + return err; + + memac_drv_param = memac->memac_drv_param; + + if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4) + slow_10g_if = true; + + /* First, reset the MAC if desired. */ + if (memac_drv_param->reset_on_init) { + err = reset(memac->regs); + if (err) { + pr_err("mEMAC reset failed\n"); + return err; + } + } + + /* MAC Address */ + MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr); + add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0); + + fixed_link = memac_drv_param->fixed_link; + + init(memac->regs, memac->memac_drv_param, memac->phy_if, + memac->max_speed, slow_10g_if, memac->exceptions); + + /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround + * Exists only in FMan 6.0 and 6.3. + */ + if ((memac->fm_rev_info.major == 6) && + ((memac->fm_rev_info.minor == 0) || + (memac->fm_rev_info.minor == 3))) { + /* MAC strips CRC from received frames - this workaround + * should decrease the likelihood of bug appearance + */ + reg32 = ioread32be(&memac->regs->command_config); + reg32 &= ~CMD_CFG_CRC_FWD; + iowrite32be(reg32, &memac->regs->command_config); + } + + if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { + /* Configure internal SGMII PHY */ + if (memac->basex_if) + setup_sgmii_internal_phy_base_x(memac); + else + setup_sgmii_internal_phy(memac, fixed_link); + } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { + /* Configure 4 internal SGMII PHYs */ + for (i = 0; i < 4; i++) { + u8 qsmgii_phy_addr, phy_addr; + /* QSGMII PHY address occupies 3 upper bits of 5-bit + * phy_address; the lower 2 bits are used to extend + * register address space and access each one of 4 + * ports inside QSGMII. + */ + phy_addr = memac->pcsphy->mdio.addr; + qsmgii_phy_addr = (u8)((phy_addr << 2) | i); + memac->pcsphy->mdio.addr = qsmgii_phy_addr; + if (memac->basex_if) + setup_sgmii_internal_phy_base_x(memac); + else + setup_sgmii_internal_phy(memac, fixed_link); + + memac->pcsphy->mdio.addr = phy_addr; + } + } + + /* Max Frame Length */ + err = fman_set_mac_max_frame(memac->fm, memac->mac_id, + memac_drv_param->max_frame_length); + if (err) { + pr_err("settings Mac max frame length is FAILED\n"); + return err; + } + + memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE); + if (!memac->multicast_addr_hash) { + free_init_resources(memac); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE); + if (!memac->unicast_addr_hash) { + free_init_resources(memac); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_ERR, memac_err_exception, memac); + + fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_NORMAL, memac_exception, memac); + + kfree(memac_drv_param); + memac->memac_drv_param = NULL; + + return 0; +} + +int memac_free(struct fman_mac *memac) +{ + free_init_resources(memac); + + kfree(memac->memac_drv_param); + kfree(memac); + + return 0; +} + +struct fman_mac *memac_config(struct fman_mac_params *params) +{ + struct fman_mac *memac; + struct memac_cfg *memac_drv_param; + void __iomem *base_addr; + + base_addr = params->base_addr; + /* allocate memory for the m_emac data structure */ + memac = kzalloc(sizeof(*memac), GFP_KERNEL); + if (!memac) + return NULL; + + /* allocate memory for the m_emac driver parameters data structure */ + memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL); + if (!memac_drv_param) { + memac_free(memac); + return NULL; + } + + /* Plant parameter structure pointer */ + memac->memac_drv_param = memac_drv_param; + + set_dflts(memac_drv_param); + + memac->addr = ENET_ADDR_TO_UINT64(params->addr); + + memac->regs = base_addr; + memac->max_speed = params->max_speed; + memac->phy_if = params->phy_if; + memac->mac_id = params->mac_id; + memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | + MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI); + memac->exception_cb = params->exception_cb; + memac->event_cb = params->event_cb; + memac->dev_id = params->dev_id; + memac->fm = params->fm; + memac->basex_if = params->basex_if; + + /* Save FMan revision */ + fman_get_revision(memac->fm, &memac->fm_rev_info); + + if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { + if (!params->internal_phy_node) { + pr_err("PCS PHY node is not available\n"); + memac_free(memac); + return NULL; + } + + memac->pcsphy = of_phy_find_device(params->internal_phy_node); + if (!memac->pcsphy) { + pr_err("of_phy_find_device (PCS PHY) failed\n"); + memac_free(memac); + return NULL; + } + } + + return memac; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h new file mode 100644 index 000000000000..173d8e0fd716 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -0,0 +1,60 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEMAC_H +#define __MEMAC_H + +#include "fman_mac.h" + +#include <linux/netdevice.h> + +struct fman_mac *memac_config(struct fman_mac_params *params); +int memac_set_promiscuous(struct fman_mac *memac, bool new_val); +int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr); +int memac_adjust_link(struct fman_mac *memac, u16 speed); +int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val); +int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable); +int memac_cfg_fixed_link(struct fman_mac *memac, + struct fixed_phy_status *fixed_link); +int memac_enable(struct fman_mac *memac, enum comm_mode mode); +int memac_disable(struct fman_mac *memac, enum comm_mode mode); +int memac_init(struct fman_mac *memac); +int memac_free(struct fman_mac *memac); +int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en); +int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + u16 pause_time, u16 thresh_time); +int memac_set_exception(struct fman_mac *memac, + enum fman_mac_exceptions exception, bool enable); +int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); +int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); + +#endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c new file mode 100644 index 000000000000..4eb0e9ac7182 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -0,0 +1,158 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fman_muram.h" + +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/genalloc.h> + +struct muram_info { + struct gen_pool *pool; + void __iomem *vbase; + size_t size; + phys_addr_t pbase; +}; + +static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram, + unsigned long vaddr) +{ + return vaddr - (unsigned long)muram->vbase; +} + +/** + * fman_muram_init + * @base: Pointer to base of memory mapped FM-MURAM. + * @size: Size of the FM-MURAM partition. + * + * Creates partition in the MURAM. + * The routine returns a pointer to the MURAM partition. + * This pointer must be passed as to all other FM-MURAM function calls. + * No actual initialization or configuration of FM_MURAM hardware is done by + * this routine. + * + * Return: pointer to FM-MURAM object, or NULL for Failure. + */ +struct muram_info *fman_muram_init(phys_addr_t base, size_t size) +{ + struct muram_info *muram; + void __iomem *vaddr; + int ret; + + muram = kzalloc(sizeof(*muram), GFP_KERNEL); + if (!muram) + return NULL; + + muram->pool = gen_pool_create(ilog2(64), -1); + if (!muram->pool) { + pr_err("%s(): MURAM pool create failed\n", __func__); + goto muram_free; + } + + vaddr = ioremap(base, size); + if (!vaddr) { + pr_err("%s(): MURAM ioremap failed\n", __func__); + goto pool_destroy; + } + + ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr, + base, size, -1); + if (ret < 0) { + pr_err("%s(): MURAM pool add failed\n", __func__); + iounmap(vaddr); + goto pool_destroy; + } + + memset_io(vaddr, 0, (int)size); + + muram->vbase = vaddr; + muram->pbase = base; + return muram; + +pool_destroy: + gen_pool_destroy(muram->pool); +muram_free: + kfree(muram); + return NULL; +} + +/** + * fman_muram_offset_to_vbase + * @muram: FM-MURAM module pointer. + * @offset: the offset of the memory block + * + * Gives the address of the memory region from specific offset + * + * Return: The address of the memory block + */ +unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, + unsigned long offset) +{ + return offset + (unsigned long)muram->vbase; +} + +/** + * fman_muram_alloc + * @muram: FM-MURAM module pointer. + * @size: Size of the memory to be allocated. + * + * Allocate some memory from FM-MURAM partition. + * + * Return: address of the allocated memory; NULL otherwise. + */ +int fman_muram_alloc(struct muram_info *muram, size_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(muram->pool, size); + if (!vaddr) + return -ENOMEM; + + memset_io((void __iomem *)vaddr, 0, size); + + return fman_muram_vbase_to_offset(muram, vaddr); +} + +/** + * fman_muram_free_mem + * muram: FM-MURAM module pointer. + * offset: offset of the memory region to be freed. + * size: size of the memory to be freed. + * + * Free an allocated memory from FM-MURAM partition. + */ +void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size) +{ + unsigned long addr = fman_muram_offset_to_vbase(muram, offset); + + gen_pool_free(muram->pool, addr, size); +} diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h new file mode 100644 index 000000000000..dbf0af9e5bb5 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h @@ -0,0 +1,51 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FM_MURAM_EXT +#define __FM_MURAM_EXT + +#include <linux/types.h> + +#define FM_MURAM_INVALID_ALLOCATION -1 + +/* Structure for FM MURAM information */ +struct muram_info; + +struct muram_info *fman_muram_init(phys_addr_t base, size_t size); + +unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, + unsigned long offset); + +int fman_muram_alloc(struct muram_info *muram, size_t size); + +void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size); + +#endif /* __FM_MURAM_EXT */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c new file mode 100644 index 000000000000..70c198d072dc --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -0,0 +1,1778 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_port.h" +#include "fman.h" +#include "fman_sp.h" + +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/delay.h> +#include <linux/libfdt_env.h> + +/* Queue ID */ +#define DFLT_FQ_ID 0x00FFFFFF + +/* General defines */ +#define PORT_BMI_FIFO_UNITS 0x100 + +#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \ + min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS) + +#define PORT_CG_MAP_NUM 8 +#define PORT_PRS_RESULT_WORDS_NUM 8 +#define PORT_IC_OFFSET_UNITS 0x10 + +#define MIN_EXT_BUF_SIZE 64 + +#define BMI_PORT_REGS_OFFSET 0 +#define QMI_PORT_REGS_OFFSET 0x400 + +/* Default values */ +#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \ + DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN + +#define DFLT_PORT_CUT_BYTES_FROM_END 4 + +#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD +#define DFLT_PORT_MAX_FRAME_LENGTH 9600 + +#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \ + MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) + +#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \ + (major == 6 ? \ + MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \ + (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \ + +#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0 + +/* QMI defines */ +#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f + +#define QMI_PORT_CFG_EN 0x80000000 +#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000 + +#define QMI_DEQ_CFG_PRI 0x80000000 +#define QMI_DEQ_CFG_TYPE1 0x10000000 +#define QMI_DEQ_CFG_TYPE2 0x20000000 +#define QMI_DEQ_CFG_TYPE3 0x30000000 +#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000 +#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000 +#define QMI_DEQ_CFG_SP_MASK 0xf +#define QMI_DEQ_CFG_SP_SHIFT 20 + +#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \ + (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400) + +/* BMI defins */ +#define BMI_EBD_EN 0x80000000 + +#define BMI_PORT_CFG_EN 0x80000000 + +#define BMI_PORT_STATUS_BSY 0x80000000 + +#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT +#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE + +#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16 +#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000 + +#define BMI_FRAME_END_CS_IGNORE_SHIFT 24 +#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f + +#define BMI_RX_FRAME_END_CUT_SHIFT 16 +#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f + +#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT +#define BMI_IC_TO_EXT_MASK 0x0000001f +#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT +#define BMI_IC_FROM_INT_MASK 0x0000000f +#define BMI_IC_SIZE_MASK 0x0000001f + +#define BMI_INT_BUF_MARG_SHIFT 28 +#define BMI_INT_BUF_MARG_MASK 0x0000000f +#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT +#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff +#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff + +#define BMI_CMD_MR_LEAC 0x00200000 +#define BMI_CMD_MR_SLEAC 0x00100000 +#define BMI_CMD_MR_MA 0x00080000 +#define BMI_CMD_MR_DEAS 0x00040000 +#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \ + BMI_CMD_MR_SLEAC | \ + BMI_CMD_MR_MA | \ + BMI_CMD_MR_DEAS) +#define BMI_CMD_TX_MR_DEF 0 + +#define BMI_CMD_ATTR_ORDER 0x80000000 +#define BMI_CMD_ATTR_SYNC 0x02000000 +#define BMI_CMD_ATTR_COLOR_SHIFT 26 + +#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12 +#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f +#define BMI_NEXT_ENG_FD_BITS_SHIFT 24 + +#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID +#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER +#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP +#define BMI_EXT_BUF_POOL_ID_SHIFT 16 +#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000 +#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16 + +#define BMI_TX_FIFO_MIN_FILL_SHIFT 16 + +#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS) +#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS) + +#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \ + ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1) + +#define RX_ERRS_TO_ENQ \ + (FM_PORT_FRM_ERR_DMA | \ + FM_PORT_FRM_ERR_PHYSICAL | \ + FM_PORT_FRM_ERR_SIZE | \ + FM_PORT_FRM_ERR_EXTRACTION | \ + FM_PORT_FRM_ERR_NO_SCHEME | \ + FM_PORT_FRM_ERR_PRS_TIMEOUT | \ + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \ + FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \ + FM_PORT_FRM_ERR_PRS_HDR_ERR | \ + FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \ + FM_PORT_FRM_ERR_IPRE) + +/* NIA defines */ +#define NIA_ORDER_RESTOR 0x00800000 +#define NIA_ENG_BMI 0x00500000 +#define NIA_ENG_QMI_ENQ 0x00540000 +#define NIA_ENG_QMI_DEQ 0x00580000 + +#define NIA_BMI_AC_ENQ_FRAME 0x00000002 +#define NIA_BMI_AC_TX_RELEASE 0x000002C0 +#define NIA_BMI_AC_RELEASE 0x000000C0 +#define NIA_BMI_AC_TX 0x00000274 +#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c + +/* Port IDs */ +#define TX_10G_PORT_BASE 0x30 +#define RX_10G_PORT_BASE 0x10 + +/* BMI Rx port register map */ +struct fman_port_rx_bmi_regs { + u32 fmbm_rcfg; /* Rx Configuration */ + u32 fmbm_rst; /* Rx Status */ + u32 fmbm_rda; /* Rx DMA attributes */ + u32 fmbm_rfp; /* Rx FIFO Parameters */ + u32 fmbm_rfed; /* Rx Frame End Data */ + u32 fmbm_ricp; /* Rx Internal Context Parameters */ + u32 fmbm_rim; /* Rx Internal Buffer Margins */ + u32 fmbm_rebm; /* Rx External Buffer Margins */ + u32 fmbm_rfne; /* Rx Frame Next Engine */ + u32 fmbm_rfca; /* Rx Frame Command Attributes. */ + u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */ + u32 fmbm_rpso; /* Rx Parse Start Offset */ + u32 fmbm_rpp; /* Rx Policer Profile */ + u32 fmbm_rccb; /* Rx Coarse Classification Base */ + u32 fmbm_reth; /* Rx Excessive Threshold */ + u32 reserved003c[1]; /* (0x03C 0x03F) */ + u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM]; + /* Rx Parse Results Array Init */ + u32 fmbm_rfqid; /* Rx Frame Queue ID */ + u32 fmbm_refqid; /* Rx Error Frame Queue ID */ + u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */ + u32 fmbm_rfsem; /* Rx Frame Status Error Mask */ + u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */ + u32 reserved0074[0x2]; /* (0x074-0x07C) */ + u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */ + u32 reserved0080[0x20]; /* (0x080 0x0FF) */ + u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM]; + /* Buffer Manager pool Information- */ + u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */ + u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */ + u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */ + u32 fmbm_mpd; /* BM Pool Depletion */ + u32 reserved0184[0x1F]; /* (0x184 0x1FF) */ + u32 fmbm_rstc; /* Rx Statistics Counters */ + u32 fmbm_rfrc; /* Rx Frame Counter */ + u32 fmbm_rfbc; /* Rx Bad Frames Counter */ + u32 fmbm_rlfc; /* Rx Large Frames Counter */ + u32 fmbm_rffc; /* Rx Filter Frames Counter */ + u32 fmbm_rfdc; /* Rx Frame Discard Counter */ + u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */ + u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */ + u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */ + u32 fmbm_rpec; /* RX Prepare to enqueue Counte */ + u32 reserved0224[0x16]; /* (0x224 0x27F) */ + u32 fmbm_rpc; /* Rx Performance Counters */ + u32 fmbm_rpcp; /* Rx Performance Count Parameters */ + u32 fmbm_rccn; /* Rx Cycle Counter */ + u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */ + u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */ + u32 fmbm_rduc; /* Rx DMA Utilization Counter */ + u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */ + u32 fmbm_rpac; /* Rx Pause Activation Counter */ + u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */ + u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */ + u32 fmbm_rgpr; /* Rx General Purpose Register */ + u32 reserved0310[0x3a]; +}; + +/* BMI Tx port register map */ +struct fman_port_tx_bmi_regs { + u32 fmbm_tcfg; /* Tx Configuration */ + u32 fmbm_tst; /* Tx Status */ + u32 fmbm_tda; /* Tx DMA attributes */ + u32 fmbm_tfp; /* Tx FIFO Parameters */ + u32 fmbm_tfed; /* Tx Frame End Data */ + u32 fmbm_ticp; /* Tx Internal Context Parameters */ + u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */ + u32 fmbm_tfca; /* Tx Frame Command attribute. */ + u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */ + u32 fmbm_tefqid; /* Tx Frame Error Queue ID */ + u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */ + u32 fmbm_trlmts; /* Tx Rate Limiter Scale */ + u32 fmbm_trlmt; /* Tx Rate Limiter */ + u32 reserved0034[0x0e]; /* (0x034-0x6c) */ + u32 fmbm_tccb; /* Tx Coarse Classification base */ + u32 fmbm_tfne; /* Tx Frame Next Engine */ + u32 fmbm_tpfcm[0x02]; + /* Tx Priority based Flow Control (PFC) Mapping */ + u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */ + u32 reserved0080[0x60]; /* (0x080-0x200) */ + u32 fmbm_tstc; /* Tx Statistics Counters */ + u32 fmbm_tfrc; /* Tx Frame Counter */ + u32 fmbm_tfdc; /* Tx Frames Discard Counter */ + u32 fmbm_tfledc; /* Tx Frame len error discard cntr */ + u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */ + u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */ + u32 reserved0218[0x1A]; /* (0x218-0x280) */ + u32 fmbm_tpc; /* Tx Performance Counters */ + u32 fmbm_tpcp; /* Tx Performance Count Parameters */ + u32 fmbm_tccn; /* Tx Cycle Counter */ + u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */ + u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */ + u32 fmbm_tduc; /* Tx DMA Utilization Counter */ + u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */ + u32 reserved029c[16]; /* (0x29C-0x2FF) */ + u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */ + u32 fmbm_tgpr; /* Tx General Purpose Register */ + u32 reserved0310[0x3a]; /* (0x310-0x3FF) */ +}; + +/* BMI port register map */ +union fman_port_bmi_regs { + struct fman_port_rx_bmi_regs rx; + struct fman_port_tx_bmi_regs tx; +}; + +/* QMI port register map */ +struct fman_port_qmi_regs { + u32 fmqm_pnc; /* PortID n Configuration Register */ + u32 fmqm_pns; /* PortID n Status Register */ + u32 fmqm_pnts; /* PortID n Task Status Register */ + u32 reserved00c[4]; /* 0xn00C - 0xn01B */ + u32 fmqm_pnen; /* PortID n Enqueue NIA Register */ + u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */ + u32 reserved024[2]; /* 0xn024 - 0x02B */ + u32 fmqm_pndn; /* PortID n Dequeue NIA Register */ + u32 fmqm_pndc; /* PortID n Dequeue Config Register */ + u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */ + u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */ + u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */ +}; + +/* QMI dequeue prefetch modes */ +enum fman_port_deq_prefetch { + FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */ + FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */ + FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */ +}; + +/* A structure for defining FM port resources */ +struct fman_port_rsrc { + u32 num; /* Committed required resource */ + u32 extra; /* Extra (not committed) required resource */ +}; + +enum fman_port_dma_swap { + FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */ + FMAN_PORT_DMA_SWAP_LE, + /* The transferred data should be swapped in PPC Little Endian mode */ + FMAN_PORT_DMA_SWAP_BE + /* The transferred data should be swapped in Big Endian mode */ +}; + +/* Default port color */ +enum fman_port_color { + FMAN_PORT_COLOR_GREEN, /* Default port color is green */ + FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */ + FMAN_PORT_COLOR_RED, /* Default port color is red */ + FMAN_PORT_COLOR_OVERRIDE /* Ignore color */ +}; + +/* QMI dequeue from the SP channel - types */ +enum fman_port_deq_type { + FMAN_PORT_DEQ_BY_PRI, + /* Priority precedence and Intra-Class scheduling */ + FMAN_PORT_DEQ_ACTIVE_FQ, + /* Active FQ precedence and Intra-Class scheduling */ + FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS + /* Active FQ precedence and override Intra-Class scheduling */ +}; + +/* External buffer pools configuration */ +struct fman_port_bpools { + u8 count; /* Num of pools to set up */ + bool counters_enable; /* Enable allocate counters */ + u8 grp_bp_depleted_num; + /* Number of depleted pools - if reached the BMI indicates + * the MAC to send a pause frame + */ + struct { + u8 bpid; /* BM pool ID */ + u16 size; + /* Pool's size - must be in ascending order */ + bool is_backup; + /* If this is a backup pool */ + bool grp_bp_depleted; + /* Consider this buffer in multiple pools depletion criteria */ + bool single_bp_depleted; + /* Consider this buffer in single pool depletion criteria */ + } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM]; +}; + +struct fman_port_cfg { + u32 dflt_fqid; + u32 err_fqid; + u8 deq_sp; + bool deq_high_priority; + enum fman_port_deq_type deq_type; + enum fman_port_deq_prefetch deq_prefetch_option; + u16 deq_byte_cnt; + u8 cheksum_last_bytes_ignore; + u8 rx_cut_end_bytes; + struct fman_buf_pool_depletion buf_pool_depletion; + struct fman_ext_pools ext_buf_pools; + u32 tx_fifo_min_level; + u32 tx_fifo_low_comf_level; + u32 rx_pri_elevation; + u32 rx_fifo_thr; + struct fman_sp_buf_margins buf_margins; + u32 int_buf_start_margin; + struct fman_sp_int_context_data_copy int_context; + u32 discard_mask; + u32 err_mask; + struct fman_buffer_prefix_content buffer_prefix_content; + bool dont_release_buf; + + u8 rx_fd_bits; + u32 tx_fifo_deq_pipeline_depth; + bool errata_A006320; + bool excessive_threshold_register; + bool fmbm_tfne_has_features; + + enum fman_port_dma_swap dma_swap_data; + enum fman_port_color color; +}; + +struct fman_port_rx_pools_params { + u8 num_of_pools; + u16 second_largest_buf_size; + u16 largest_buf_size; +}; + +struct fman_port_dts_params { + void __iomem *base_addr; /* FMan port virtual memory */ + enum fman_port_type type; /* Port type */ + u16 speed; /* Port speed */ + u8 id; /* HW Port Id */ + u32 qman_channel_id; /* QMan channel id (non RX only) */ + struct fman *fman; /* FMan Handle */ +}; + +struct fman_port { + void *fm; + struct device *dev; + struct fman_rev_info rev_info; + u8 port_id; + enum fman_port_type port_type; + u16 port_speed; + + union fman_port_bmi_regs __iomem *bmi_regs; + struct fman_port_qmi_regs __iomem *qmi_regs; + + struct fman_sp_buffer_offsets buffer_offsets; + + u8 internal_buf_offset; + struct fman_ext_pools ext_buf_pools; + + u16 max_frame_length; + struct fman_port_rsrc open_dmas; + struct fman_port_rsrc tasks; + struct fman_port_rsrc fifo_bufs; + struct fman_port_rx_pools_params rx_pools_params; + + struct fman_port_cfg *cfg; + struct fman_port_dts_params dts_params; + + u8 ext_pools_num; + u32 max_port_fifo_size; + u32 max_num_of_ext_pools; + u32 max_num_of_sub_portals; + u32 bm_max_num_of_pools; +}; + +static int init_bmi_rx(struct fman_port *port) +{ + struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* DMA attributes */ + tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT; + /* Enable write optimization */ + tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE; + iowrite32be(tmp, ®s->fmbm_rda); + + /* Rx FIFO parameters */ + tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) << + BMI_RX_FIFO_PRI_ELEVATION_SHIFT; + tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1; + iowrite32be(tmp, ®s->fmbm_rfp); + + if (cfg->excessive_threshold_register) + /* always allow access to the extra resources */ + iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, ®s->fmbm_reth); + + /* Frame end data */ + tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) << + BMI_FRAME_END_CS_IGNORE_SHIFT; + tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) << + BMI_RX_FRAME_END_CUT_SHIFT; + if (cfg->errata_A006320) + tmp &= 0xffe0ffff; + iowrite32be(tmp, ®s->fmbm_rfed); + + /* Internal context parameters */ + tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT; + tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT; + tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) & + BMI_IC_SIZE_MASK; + iowrite32be(tmp, ®s->fmbm_ricp); + + /* Internal buffer offset */ + tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) & + BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT; + iowrite32be(tmp, ®s->fmbm_rim); + + /* External buffer margins */ + tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) << + BMI_EXT_BUF_MARG_START_SHIFT; + tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK; + iowrite32be(tmp, ®s->fmbm_rebm); + + /* Frame attributes */ + tmp = BMI_CMD_RX_MR_DEF; + tmp |= BMI_CMD_ATTR_ORDER; + tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT; + /* Synchronization request */ + tmp |= BMI_CMD_ATTR_SYNC; + + iowrite32be(tmp, ®s->fmbm_rfca); + + /* NIA */ + tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT; + + tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; + iowrite32be(tmp, ®s->fmbm_rfne); + + /* Enqueue NIA */ + iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene); + + /* Default/error queues */ + iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), ®s->fmbm_rfqid); + iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_refqid); + + /* Discard/error masks */ + iowrite32be(cfg->discard_mask, ®s->fmbm_rfsdm); + iowrite32be(cfg->err_mask, ®s->fmbm_rfsem); + + return 0; +} + +static int init_bmi_tx(struct fman_port *port) +{ + struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* Tx Configuration register */ + tmp = 0; + iowrite32be(tmp, ®s->fmbm_tcfg); + + /* DMA attributes */ + tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT; + iowrite32be(tmp, ®s->fmbm_tda); + + /* Tx FIFO parameters */ + tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) << + BMI_TX_FIFO_MIN_FILL_SHIFT; + tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) & + BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT; + tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1; + iowrite32be(tmp, ®s->fmbm_tfp); + + /* Frame end data */ + tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) << + BMI_FRAME_END_CS_IGNORE_SHIFT; + iowrite32be(tmp, ®s->fmbm_tfed); + + /* Internal context parameters */ + tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT; + tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT; + tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) & + BMI_IC_SIZE_MASK; + iowrite32be(tmp, ®s->fmbm_ticp); + + /* Frame attributes */ + tmp = BMI_CMD_TX_MR_DEF; + tmp |= BMI_CMD_ATTR_ORDER; + tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT; + iowrite32be(tmp, ®s->fmbm_tfca); + + /* Dequeue NIA + enqueue NIA */ + iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_tfdne); + iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_tfene); + if (cfg->fmbm_tfne_has_features) + iowrite32be(!cfg->dflt_fqid ? + BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME : + NIA_BMI_AC_FETCH_ALL_FRAME, ®s->fmbm_tfne); + if (!cfg->dflt_fqid && cfg->dont_release_buf) { + iowrite32be(DFLT_FQ_ID, ®s->fmbm_tcfqid); + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + ®s->fmbm_tfene); + if (cfg->fmbm_tfne_has_features) + iowrite32be(ioread32be(®s->fmbm_tfne) & ~BMI_EBD_EN, + ®s->fmbm_tfne); + } + + /* Confirmation/error queues */ + if (cfg->dflt_fqid || !cfg->dont_release_buf) + iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, ®s->fmbm_tcfqid); + iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_tefqid); + + return 0; +} + +static int init_qmi(struct fman_port *port) +{ + struct fman_port_qmi_regs __iomem *regs = port->qmi_regs; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* Rx port configuration */ + if (port->port_type == FMAN_PORT_TYPE_RX) { + /* Enqueue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen); + return 0; + } + + /* Continue with Tx port configuration */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + /* Enqueue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + ®s->fmqm_pnen); + /* Dequeue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, ®s->fmqm_pndn); + } + + /* Dequeue Configuration register */ + tmp = 0; + if (cfg->deq_high_priority) + tmp |= QMI_DEQ_CFG_PRI; + + switch (cfg->deq_type) { + case FMAN_PORT_DEQ_BY_PRI: + tmp |= QMI_DEQ_CFG_TYPE1; + break; + case FMAN_PORT_DEQ_ACTIVE_FQ: + tmp |= QMI_DEQ_CFG_TYPE2; + break; + case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS: + tmp |= QMI_DEQ_CFG_TYPE3; + break; + default: + return -EINVAL; + } + + switch (cfg->deq_prefetch_option) { + case FMAN_PORT_DEQ_NO_PREFETCH: + break; + case FMAN_PORT_DEQ_PART_PREFETCH: + tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL; + break; + case FMAN_PORT_DEQ_FULL_PREFETCH: + tmp |= QMI_DEQ_CFG_PREFETCH_FULL; + break; + default: + return -EINVAL; + } + + tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT; + tmp |= cfg->deq_byte_cnt; + iowrite32be(tmp, ®s->fmqm_pndc); + + return 0; +} + +static int init(struct fman_port *port) +{ + int err; + + /* Init BMI registers */ + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + err = init_bmi_rx(port); + break; + case FMAN_PORT_TYPE_TX: + err = init_bmi_tx(port); + break; + default: + return -EINVAL; + } + + if (err) + return err; + + /* Init QMI registers */ + err = init_qmi(port); + return err; + + return 0; +} + +static int set_bpools(const struct fman_port *port, + const struct fman_port_bpools *bp) +{ + u32 __iomem *bp_reg, *bp_depl_reg; + u32 tmp; + u8 i, max_bp_num; + bool grp_depl_used = false, rx_port; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + max_bp_num = port->ext_pools_num; + rx_port = true; + bp_reg = port->bmi_regs->rx.fmbm_ebmpi; + bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd; + break; + default: + return -EINVAL; + } + + if (rx_port) { + /* Check buffers are provided in ascending order */ + for (i = 0; (i < (bp->count - 1) && + (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) { + if (bp->bpool[i].size > bp->bpool[i + 1].size) + return -EINVAL; + } + } + + /* Set up external buffers pools */ + for (i = 0; i < bp->count; i++) { + tmp = BMI_EXT_BUF_POOL_VALID; + tmp |= ((u32)bp->bpool[i].bpid << + BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK; + + if (rx_port) { + if (bp->counters_enable) + tmp |= BMI_EXT_BUF_POOL_EN_COUNTER; + + if (bp->bpool[i].is_backup) + tmp |= BMI_EXT_BUF_POOL_BACKUP; + + tmp |= (u32)bp->bpool[i].size; + } + + iowrite32be(tmp, &bp_reg[i]); + } + + /* Clear unused pools */ + for (i = bp->count; i < max_bp_num; i++) + iowrite32be(0, &bp_reg[i]); + + /* Pools depletion */ + tmp = 0; + for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) { + if (bp->bpool[i].grp_bp_depleted) { + grp_depl_used = true; + tmp |= 0x80000000 >> i; + } + + if (bp->bpool[i].single_bp_depleted) + tmp |= 0x80 >> i; + } + + if (grp_depl_used) + tmp |= ((u32)bp->grp_bp_depleted_num - 1) << + BMI_POOL_DEP_NUM_OF_POOLS_SHIFT; + + iowrite32be(tmp, bp_depl_reg); + return 0; +} + +static bool is_init_done(struct fman_port_cfg *cfg) +{ + /* Checks if FMan port driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +static int verify_size_of_fifo(struct fman_port *port) +{ + u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0; + + /* TX Ports */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + min_fifo_size_required = (u32) + (roundup(port->max_frame_length, + FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS)); + + min_fifo_size_required += + port->cfg->tx_fifo_deq_pipeline_depth * + FMAN_BMI_FIFO_UNITS; + + opt_fifo_size_for_b2b = min_fifo_size_required; + + /* Add some margin for back-to-back capability to improve + * performance, allows the hardware to pipeline new frame dma + * while the previous frame not yet transmitted. + */ + if (port->port_speed == 10000) + opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS; + else + opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS; + } + + /* RX Ports */ + else if (port->port_type == FMAN_PORT_TYPE_RX) { + if (port->rev_info.major >= 6) + min_fifo_size_required = (u32) + (roundup(port->max_frame_length, + FMAN_BMI_FIFO_UNITS) + + (5 * FMAN_BMI_FIFO_UNITS)); + /* 4 according to spec + 1 for FOF>0 */ + else + min_fifo_size_required = (u32) + (roundup(min(port->max_frame_length, + port->rx_pools_params.largest_buf_size), + FMAN_BMI_FIFO_UNITS) + + (7 * FMAN_BMI_FIFO_UNITS)); + + opt_fifo_size_for_b2b = min_fifo_size_required; + + /* Add some margin for back-to-back capability to improve + * performance,allows the hardware to pipeline new frame dma + * while the previous frame not yet transmitted. + */ + if (port->port_speed == 10000) + opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS; + else + opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS; + } + + WARN_ON(min_fifo_size_required <= 0); + WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required); + + /* Verify the size */ + if (port->fifo_bufs.num < min_fifo_size_required) + dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n", + __func__, min_fifo_size_required); + else if (port->fifo_bufs.num < opt_fifo_size_for_b2b) + dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n", + __func__, opt_fifo_size_for_b2b); + + return 0; +} + +static int set_ext_buffer_pools(struct fman_port *port) +{ + struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools; + struct fman_buf_pool_depletion *buf_pool_depletion = + &port->cfg->buf_pool_depletion; + u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM]; + u16 sizes_array[BM_MAX_NUM_OF_POOLS]; + int i = 0, j = 0, err; + struct fman_port_bpools bpools; + + memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM); + memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS); + memcpy(&port->ext_buf_pools, ext_buf_pools, + sizeof(struct fman_ext_pools)); + + fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools, + ordered_array, + sizes_array); + + memset(&bpools, 0, sizeof(struct fman_port_bpools)); + bpools.count = ext_buf_pools->num_of_pools_used; + bpools.counters_enable = true; + for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) { + bpools.bpool[i].bpid = ordered_array[i]; + bpools.bpool[i].size = sizes_array[ordered_array[i]]; + } + + /* save pools parameters for later use */ + port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used; + port->rx_pools_params.largest_buf_size = + sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]]; + port->rx_pools_params.second_largest_buf_size = + sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]]; + + /* FMBM_RMPD reg. - pool depletion */ + if (buf_pool_depletion->pools_grp_mode_enable) { + bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools; + for (i = 0; i < port->bm_max_num_of_pools; i++) { + if (buf_pool_depletion->pools_to_consider[i]) { + for (j = 0; j < ext_buf_pools-> + num_of_pools_used; j++) { + if (i == ordered_array[j]) { + bpools.bpool[j]. + grp_bp_depleted = true; + break; + } + } + } + } + } + + if (buf_pool_depletion->single_pool_mode_enable) { + for (i = 0; i < port->bm_max_num_of_pools; i++) { + if (buf_pool_depletion-> + pools_to_consider_for_single_mode[i]) { + for (j = 0; j < ext_buf_pools-> + num_of_pools_used; j++) { + if (i == ordered_array[j]) { + bpools.bpool[j]. + single_bp_depleted = true; + break; + } + } + } + } + } + + err = set_bpools(port, &bpools); + if (err != 0) { + dev_err(port->dev, "%s: set_bpools() failed\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int init_low_level_driver(struct fman_port *port) +{ + struct fman_port_cfg *cfg = port->cfg; + u32 tmp_val; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask); + break; + default: + break; + } + + tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ? + (port->internal_buf_offset / OFFSET_UNITS + 1) : + (port->internal_buf_offset / OFFSET_UNITS)); + port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS); + port->cfg->int_buf_start_margin = port->internal_buf_offset; + + if (init(port) != 0) { + dev_err(port->dev, "%s: fman port initialization failed\n", + __func__); + return -ENODEV; + } + + /* The code bellow is a trick so the FM will not release the buffer + * to BM nor will try to enqueue the frame to QM + */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + if (!cfg->dflt_fqid && cfg->dont_release_buf) { + /* override fmbm_tcfqid 0 with a false non-0 value. + * This will force FM to act according to tfene. + * Otherwise, if fmbm_tcfqid is 0 the FM will release + * buffers to BM regardless of fmbm_tfene + */ + iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid); + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + &port->bmi_regs->tx.fmbm_tfene); + } + } + + return 0; +} + +static int fill_soc_specific_params(struct fman_port *port) +{ + u32 bmi_max_fifo_size; + + bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm); + port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size); + port->bm_max_num_of_pools = 64; + + /* P4080 - Major 2 + * P2041/P3041/P5020/P5040 - Major 3 + * Tx/Bx - Major 6 + */ + switch (port->rev_info.major) { + case 2: + case 3: + port->max_num_of_ext_pools = 4; + port->max_num_of_sub_portals = 12; + break; + + case 6: + port->max_num_of_ext_pools = 8; + port->max_num_of_sub_portals = 16; + break; + + default: + dev_err(port->dev, "%s: Unsupported FMan version\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + switch (speed) { + case 10000: + return 4; + case 1000: + if (major >= 6) + return 2; + else + return 1; + default: + return 0; + } + default: + return 0; + } +} + +static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + switch (speed) { + case 10000: + return 16; + case 1000: + if (major >= 6) + return 4; + else + return 3; + default: + return 0; + } + default: + return 0; + } +} + +static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + /* FMan V3 */ + if (major >= 6) + return 0; + + /* FMan V2 */ + if (speed == 10000) + return 8; + else + return 2; + case FMAN_PORT_TYPE_TX: + default: + return 0; + } +} + +static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type, + u16 speed) +{ + int val; + + if (major >= 6) { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 12; + else + val = 3; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 8; + else + val = 2; + break; + default: + return 0; + } + } else { + switch (type) { + case FMAN_PORT_TYPE_TX: + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 8; + else + val = 1; + break; + default: + val = 0; + } + } + + return val; +} + +static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type, + u16 speed) +{ + /* FMan V3 */ + if (major >= 6) + return 0; + + /* FMan V2 */ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + return 8; + else + return 1; + default: + return 0; + } +} + +static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type, + u16 speed) +{ + int val; + + if (major >= 6) { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 64; + else + val = 50; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 96; + else + val = 50; + break; + default: + val = 0; + } + } else { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 48; + else + val = 44; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 48; + else + val = 45; + break; + default: + val = 0; + } + } + + return val; +} + +static void set_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params) +{ + struct fman_port_cfg *cfg = port->cfg; + + cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP; + cfg->color = FMAN_PORT_COLOR_GREEN; + cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END; + cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL; + cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD; + cfg->tx_fifo_low_comf_level = (5 * 1024); + cfg->deq_type = FMAN_PORT_DEQ_BY_PRI; + cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH; + cfg->tx_fifo_deq_pipeline_depth = + BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed); + cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type); + + cfg->rx_pri_elevation = + DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size); + port->cfg->rx_fifo_thr = + DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major, + port->max_port_fifo_size); + + if ((port->rev_info.major == 6) && + ((port->rev_info.minor == 0) || (port->rev_info.minor == 3))) + cfg->errata_A006320 = true; + + /* Excessive Threshold register - exists for pre-FMv3 chips only */ + if (port->rev_info.major < 6) + cfg->excessive_threshold_register = true; + else + cfg->fmbm_tfne_has_features = true; + + cfg->buffer_prefix_content.data_align = + DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN; +} + +static void set_rx_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params) +{ + port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD; + + memcpy(&port->cfg->ext_buf_pools, + &port_params->specific_params.rx_params.ext_buf_pools, + sizeof(struct fman_ext_pools)); + port->cfg->err_fqid = + port_params->specific_params.rx_params.err_fqid; + port->cfg->dflt_fqid = + port_params->specific_params.rx_params.dflt_fqid; +} + +static void set_tx_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params, + struct fman_port_dts_params *dts_params) +{ + port->cfg->tx_fifo_deq_pipeline_depth = + get_dflt_fifo_deq_pipeline_depth(port->rev_info.major, + port->port_type, + port->port_speed); + port->cfg->err_fqid = + port_params->specific_params.non_rx_params.err_fqid; + port->cfg->deq_sp = + (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK); + port->cfg->dflt_fqid = + port_params->specific_params.non_rx_params.dflt_fqid; + port->cfg->deq_high_priority = true; +} + +/** + * fman_port_config + * @port: Pointer to the port structure + * @params: Pointer to data structure of parameters + * + * Creates a descriptor for the FM PORT module. + * The routine returns a pointer to the FM PORT object. + * This descriptor must be passed as first parameter to all other FM PORT + * function calls. + * No actual initialization or configuration of FM hardware is done by this + * routine. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_config(struct fman_port *port, struct fman_port_params *params) +{ + void __iomem *base_addr = port->dts_params.base_addr; + int err; + + /* Allocate the FM driver's parameters structure */ + port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); + if (!port->cfg) + goto err_params; + + /* Initialize FM port parameters which will be kept by the driver */ + port->port_type = port->dts_params.type; + port->port_speed = port->dts_params.speed; + port->port_id = port->dts_params.id; + port->fm = port->dts_params.fman; + port->ext_pools_num = (u8)8; + + /* get FM revision */ + fman_get_revision(port->fm, &port->rev_info); + + err = fill_soc_specific_params(port); + if (err) + goto err_port_cfg; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + set_rx_dflt_cfg(port, params); + case FMAN_PORT_TYPE_TX: + set_tx_dflt_cfg(port, params, &port->dts_params); + default: + set_dflt_cfg(port, params); + } + + /* Continue with other parameters */ + /* set memory map pointers */ + port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET; + port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET; + + port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH; + /* resource distribution. */ + + port->fifo_bufs.num = + get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type, + port->port_speed) * FMAN_BMI_FIFO_UNITS; + port->fifo_bufs.extra = + DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS; + + port->open_dmas.num = + get_dflt_num_of_open_dmas(port->rev_info.major, + port->port_type, port->port_speed); + port->open_dmas.extra = + get_dflt_extra_num_of_open_dmas(port->rev_info.major, + port->port_type, port->port_speed); + port->tasks.num = + get_dflt_num_of_tasks(port->rev_info.major, + port->port_type, port->port_speed); + port->tasks.extra = + get_dflt_extra_num_of_tasks(port->rev_info.major, + port->port_type, port->port_speed); + + /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata + * workaround + */ + if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) && + (((port->port_type == FMAN_PORT_TYPE_TX) && + (port->port_speed == 1000)))) { + port->open_dmas.num = 16; + port->open_dmas.extra = 0; + } + + if (port->rev_info.major >= 6 && + port->port_type == FMAN_PORT_TYPE_TX && + port->port_speed == 1000) { + /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata + * workaround + */ + if (port->rev_info.major >= 6) { + u32 reg; + + reg = 0x00001013; + iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); + } + } + + return 0; + +err_port_cfg: + kfree(port->cfg); +err_params: + kfree(port); + return -EINVAL; +} +EXPORT_SYMBOL(fman_port_config); + +/** + * fman_port_init + * port: A pointer to a FM Port module. + * Initializes the FM PORT module by defining the software structure and + * configuring the hardware registers. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_init(struct fman_port *port) +{ + struct fman_port_cfg *cfg; + int err; + struct fman_port_init_params params; + + if (is_init_done(port->cfg)) + return -EINVAL; + + err = fman_sp_build_buffer_struct(&port->cfg->int_context, + &port->cfg->buffer_prefix_content, + &port->cfg->buf_margins, + &port->buffer_offsets, + &port->internal_buf_offset); + if (err) + return err; + + cfg = port->cfg; + + if (port->port_type == FMAN_PORT_TYPE_RX) { + /* Call the external Buffer routine which also checks fifo + * size and updates it if necessary + */ + /* define external buffer pools and pool depletion */ + err = set_ext_buffer_pools(port); + if (err) + return err; + /* check if the largest external buffer pool is large enough */ + if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE + + cfg->buf_margins.end_margins > + port->rx_pools_params.largest_buf_size) { + dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n", + __func__, cfg->buf_margins.start_margins, + cfg->buf_margins.end_margins, + port->rx_pools_params.largest_buf_size); + return -EINVAL; + } + } + + /* Call FM module routine for communicating parameters */ + memset(¶ms, 0, sizeof(params)); + params.port_id = port->port_id; + params.port_type = port->port_type; + params.port_speed = port->port_speed; + params.num_of_tasks = (u8)port->tasks.num; + params.num_of_extra_tasks = (u8)port->tasks.extra; + params.num_of_open_dmas = (u8)port->open_dmas.num; + params.num_of_extra_open_dmas = (u8)port->open_dmas.extra; + + if (port->fifo_bufs.num) { + err = verify_size_of_fifo(port); + if (err) + return err; + } + params.size_of_fifo = port->fifo_bufs.num; + params.extra_size_of_fifo = port->fifo_bufs.extra; + params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth; + params.max_frame_length = port->max_frame_length; + + err = fman_set_port_params(port->fm, ¶ms); + if (err) + return err; + + err = init_low_level_driver(port); + if (err) + return err; + + kfree(port->cfg); + port->cfg = NULL; + + return 0; +} +EXPORT_SYMBOL(fman_port_init); + +/** + * fman_port_cfg_buf_prefix_content + * @port A pointer to a FM Port module. + * @buffer_prefix_content A structure of parameters describing + * the structure of the buffer. + * Out parameter: + * Start margin - offset of data from + * start of external buffer. + * Defines the structure, size and content of the application buffer. + * The prefix, in Tx ports, if 'pass_prs_result', the application should set + * a value to their offsets in the prefix of the FM will save the first + * 'priv_data_size', than, depending on 'pass_prs_result' and + * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself + * (in this order), to the application buffer, and to offset. + * Calling this routine changes the buffer margins definitions in the internal + * driver data base from its default configuration: + * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE] + * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT]. + * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP]. + * May be used for all ports + * + * Allowed only following fman_port_config() and before fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_cfg_buf_prefix_content(struct fman_port *port, + struct fman_buffer_prefix_content * + buffer_prefix_content) +{ + if (is_init_done(port->cfg)) + return -EINVAL; + + memcpy(&port->cfg->buffer_prefix_content, + buffer_prefix_content, + sizeof(struct fman_buffer_prefix_content)); + /* if data_align was not initialized by user, + * we return to driver's default + */ + if (!port->cfg->buffer_prefix_content.data_align) + port->cfg->buffer_prefix_content.data_align = + DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN; + + return 0; +} +EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content); + +/** + * fman_port_disable + * port: A pointer to a FM Port module. + * + * Gracefully disable an FM port. The port will not start new tasks after all + * tasks associated with the port are terminated. + * + * This is a blocking routine, it returns after port is gracefully stopped, + * i.e. the port will not except new frames, but it will finish all frames + * or tasks which were already began. + * Allowed only following fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_disable(struct fman_port *port) +{ + u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp; + bool rx_port, failure = false; + int count; + + if (!is_init_done(port->cfg)) + return -EINVAL; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg; + bmi_status_reg = &port->bmi_regs->rx.fmbm_rst; + rx_port = true; + break; + case FMAN_PORT_TYPE_TX: + bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg; + bmi_status_reg = &port->bmi_regs->tx.fmbm_tst; + rx_port = false; + break; + default: + return -EINVAL; + } + + /* Disable QMI */ + if (!rx_port) { + tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN; + iowrite32be(tmp, &port->qmi_regs->fmqm_pnc); + + /* Wait for QMI to finish FD handling */ + count = 100; + do { + udelay(10); + tmp = ioread32be(&port->qmi_regs->fmqm_pns); + } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count); + + if (count == 0) { + /* Timeout */ + failure = true; + } + } + + /* Disable BMI */ + tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN; + iowrite32be(tmp, bmi_cfg_reg); + + /* Wait for graceful stop end */ + count = 500; + do { + udelay(10); + tmp = ioread32be(bmi_status_reg); + } while ((tmp & BMI_PORT_STATUS_BSY) && --count); + + if (count == 0) { + /* Timeout */ + failure = true; + } + + if (failure) + dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n", + __func__, port->port_id); + + return 0; +} +EXPORT_SYMBOL(fman_port_disable); + +/** + * fman_port_enable + * port: A pointer to a FM Port module. + * + * A runtime routine provided to allow disable/enable of port. + * + * Allowed only following fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_enable(struct fman_port *port) +{ + u32 __iomem *bmi_cfg_reg, tmp; + bool rx_port; + + if (!is_init_done(port->cfg)) + return -EINVAL; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg; + rx_port = true; + break; + case FMAN_PORT_TYPE_TX: + bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg; + rx_port = false; + break; + default: + return -EINVAL; + } + + /* Enable QMI */ + if (!rx_port) { + tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN; + iowrite32be(tmp, &port->qmi_regs->fmqm_pnc); + } + + /* Enable BMI */ + tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN; + iowrite32be(tmp, bmi_cfg_reg); + + return 0; +} +EXPORT_SYMBOL(fman_port_enable); + +/** + * fman_port_bind + * dev: FMan Port OF device pointer + * + * Bind to a specific FMan Port. + * + * Allowed only after the port was created. + * + * Return: A pointer to the FMan port device. + */ +struct fman_port *fman_port_bind(struct device *dev) +{ + return (struct fman_port *)(dev_get_drvdata(get_device(dev))); +} +EXPORT_SYMBOL(fman_port_bind); + +/** + * fman_port_get_qman_channel_id + * port: Pointer to the FMan port devuce + * + * Get the QMan channel ID for the specific port + * + * Return: QMan channel ID + */ +u32 fman_port_get_qman_channel_id(struct fman_port *port) +{ + return port->dts_params.qman_channel_id; +} +EXPORT_SYMBOL(fman_port_get_qman_channel_id); + +static int fman_port_probe(struct platform_device *of_dev) +{ + struct fman_port *port; + struct fman *fman; + struct device_node *fm_node, *port_node; + struct resource res; + struct resource *dev_res; + const u32 *u32_prop; + int err = 0, lenp; + enum fman_port_type port_type; + u16 port_speed; + u8 port_id; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->dev = &of_dev->dev; + + port_node = of_node_get(of_dev->dev.of_node); + + /* Get the FM node */ + fm_node = of_get_parent(port_node); + if (!fm_node) { + dev_err(port->dev, "%s: of_get_parent() failed\n", __func__); + err = -ENODEV; + goto return_err; + } + + fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + of_node_put(fm_node); + if (!fman) { + err = -EINVAL; + goto return_err; + } + + u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n", + __func__, port_node->full_name); + err = -EINVAL; + goto return_err; + } + if (WARN_ON(lenp != sizeof(u32))) { + err = -EINVAL; + goto return_err; + } + port_id = (u8)fdt32_to_cpu(u32_prop[0]); + + port->dts_params.id = port_id; + + if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { + port_type = FMAN_PORT_TYPE_TX; + port_speed = 1000; + u32_prop = (const u32 *)of_get_property(port_node, + "fsl,fman-10g-port", + &lenp); + if (u32_prop) + port_speed = 10000; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { + if (port_id >= TX_10G_PORT_BASE) + port_speed = 10000; + else + port_speed = 1000; + port_type = FMAN_PORT_TYPE_TX; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { + port_type = FMAN_PORT_TYPE_RX; + port_speed = 1000; + u32_prop = (const u32 *)of_get_property(port_node, + "fsl,fman-10g-port", &lenp); + if (u32_prop) + port_speed = 10000; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { + if (port_id >= RX_10G_PORT_BASE) + port_speed = 10000; + else + port_speed = 1000; + port_type = FMAN_PORT_TYPE_RX; + + } else { + dev_err(port->dev, "%s: Illegal port type\n", __func__); + err = -EINVAL; + goto return_err; + } + + port->dts_params.type = port_type; + port->dts_params.speed = port_speed; + + if (port_type == FMAN_PORT_TYPE_TX) { + u32 qman_channel_id; + + qman_channel_id = fman_get_qman_channel_id(fman, port_id); + if (qman_channel_id == 0) { + dev_err(port->dev, "%s: incorrect qman-channel-id\n", + __func__); + err = -EINVAL; + goto return_err; + } + port->dts_params.qman_channel_id = qman_channel_id; + } + + err = of_address_to_resource(port_node, 0, &res); + if (err < 0) { + dev_err(port->dev, "%s: of_address_to_resource() failed\n", + __func__); + err = -ENOMEM; + goto return_err; + } + + port->dts_params.fman = fman; + + of_node_put(port_node); + + dev_res = __devm_request_region(port->dev, &res, res.start, + resource_size(&res), "fman-port"); + if (!dev_res) { + dev_err(port->dev, "%s: __devm_request_region() failed\n", + __func__); + err = -EINVAL; + goto free_port; + } + + port->dts_params.base_addr = devm_ioremap(port->dev, res.start, + resource_size(&res)); + if (port->dts_params.base_addr == 0) + dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__); + + dev_set_drvdata(&of_dev->dev, port); + + return 0; + +return_err: + of_node_put(port_node); +free_port: + kfree(port); + return err; +} + +static const struct of_device_id fman_port_match[] = { + {.compatible = "fsl,fman-v3-port-rx"}, + {.compatible = "fsl,fman-v2-port-rx"}, + {.compatible = "fsl,fman-v3-port-tx"}, + {.compatible = "fsl,fman-v2-port-tx"}, + {} +}; + +MODULE_DEVICE_TABLE(of, fman_port_match); + +static struct platform_driver fman_port_driver = { + .driver = { + .name = "fsl-fman-port", + .of_match_table = fman_port_match, + }, + .probe = fman_port_probe, +}; + +builtin_platform_driver(fman_port_driver); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h new file mode 100644 index 000000000000..8ba901737048 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -0,0 +1,151 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FMAN_PORT_H +#define __FMAN_PORT_H + +#include "fman.h" + +/* FM Port API + * The FM uses a general module called "port" to represent a Tx port (MAC), + * an Rx port (MAC). + * The number of ports in an FM varies between SOCs. + * The SW driver manages these ports as sub-modules of the FM,i.e. after an + * FM is initialized, its ports may be initialized and operated upon. + * The port is initialized aware of its type, but other functions on a port + * may be indifferent to its type. When necessary, the driver verifies + * coherence and returns error if applicable. + * On initialization, user specifies the port type and it's index (relative + * to the port's type) - always starting at 0. + */ + +/* FM Frame error */ +/* Frame Descriptor errors */ +/* Not for Rx-Port! Unsupported Format */ +#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT +/* Not for Rx-Port! Length Error */ +#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH +/* DMA Data error */ +#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA +/* non Frame-Manager error; probably come from SEC that was chained to FM */ +#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM + /* IPR error */ +#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) +/* IPR non-consistent-sp */ +#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \ + ~FM_FD_IPR) + +/* Rx FIFO overflow, FCS error, code error, running disparity + * error (SGMII and TBI modes), FIFO parity error. + * PHY Sequence error, PHY error control character detected. + */ +#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL +/* Frame too long OR Frame size exceeds max_length_frame */ +#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE +/* indicates a classifier "drop" operation */ +#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD +/* Extract Out of Frame */ +#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION +/* No Scheme Selected */ +#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME +/* Keysize Overflow */ +#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW +/* Frame color is red */ +#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED +/* Frame color is yellow */ +#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW +/* Parser Time out Exceed */ +#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT +/* Invalid Soft Parser instruction */ +#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT +/* Header error was identified during parsing */ +#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR +/* Frame parsed beyind 256 first bytes */ +#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED +/* FPM Frame Processing Timeout Exceeded */ +#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001 + +struct fman_port; + +/* A structure for additional Rx port parameters */ +struct fman_port_rx_params { + u32 err_fqid; /* Error Queue Id. */ + u32 dflt_fqid; /* Default Queue Id. */ + /* Which external buffer pools are used + * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes. + */ + struct fman_ext_pools ext_buf_pools; +}; + +/* A structure for additional non-Rx port parameters */ +struct fman_port_non_rx_params { + /* Error Queue Id. */ + u32 err_fqid; + /* For Tx - Default Confirmation queue, 0 means no Tx confirmation + * for processed frames. For OP port - default Rx queue. + */ + u32 dflt_fqid; +}; + +/* A union for additional parameters depending on port type */ +union fman_port_specific_params { + /* Rx port parameters structure */ + struct fman_port_rx_params rx_params; + /* Non-Rx port parameters structure */ + struct fman_port_non_rx_params non_rx_params; +}; + +/* A structure representing FM initialization parameters */ +struct fman_port_params { + /* Virtual Address of memory mapped FM Port registers. */ + void *fm; + union fman_port_specific_params specific_params; + /* Additional parameters depending on port type. */ +}; + +int fman_port_config(struct fman_port *port, struct fman_port_params *params); + +int fman_port_init(struct fman_port *port); + +int fman_port_cfg_buf_prefix_content(struct fman_port *port, + struct fman_buffer_prefix_content + *buffer_prefix_content); + +int fman_port_disable(struct fman_port *port); + +int fman_port_enable(struct fman_port *port); + +u32 fman_port_get_qman_channel_id(struct fman_port *port); + +struct fman_port *fman_port_bind(struct device *dev); + +#endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c new file mode 100644 index 000000000000..f9e7aa385cba --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_sp.c @@ -0,0 +1,166 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fman_sp.h" +#include "fman.h" + +void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools + *fm_ext_pools, + u8 *ordered_array, + u16 *sizes_array) +{ + u16 buf_size = 0; + int i = 0, j = 0, k = 0; + + /* First we copy the external buffers pools information + * to an ordered local array + */ + for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) { + /* get pool size */ + buf_size = fm_ext_pools->ext_buf_pool[i].size; + + /* keep sizes in an array according to poolId + * for direct access + */ + sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size; + + /* save poolId in an ordered array according to size */ + for (j = 0; j <= i; j++) { + /* this is the next free place in the array */ + if (j == i) + ordered_array[i] = + fm_ext_pools->ext_buf_pool[i].id; + else { + /* find the right place for this poolId */ + if (buf_size < sizes_array[ordered_array[j]]) { + /* move the pool_ids one place ahead + * to make room for this poolId + */ + for (k = i; k > j; k--) + ordered_array[k] = + ordered_array[k - 1]; + + /* now k==j, this is the place for + * the new size + */ + ordered_array[k] = + fm_ext_pools->ext_buf_pool[i].id; + break; + } + } + } + } +} + +int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * + int_context_data_copy, + struct fman_buffer_prefix_content * + buffer_prefix_content, + struct fman_sp_buf_margins *buf_margins, + struct fman_sp_buffer_offsets *buffer_offsets, + u8 *internal_buf_offset) +{ + u32 tmp; + + /* Align start of internal context data to 16 byte */ + int_context_data_copy->ext_buf_offset = (u16) + ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ? + ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) & + ~(u16)(OFFSET_UNITS - 1)) : + buffer_prefix_content->priv_data_size); + + /* Translate margin and int_context params to FM parameters */ + /* Initialize with illegal value. Later we'll set legal values. */ + buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE; + buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE; + buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE; + + /* Internally the driver supports 4 options + * 1. prsResult/timestamp/hashResult selection (in fact 8 options, + * but for simplicity we'll + * relate to it as 1). + * 2. All IC context (from AD) not including debug. + */ + + /* This case covers the options under 1 */ + /* Copy size must be in 16-byte granularity. */ + int_context_data_copy->size = + (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) + + ((buffer_prefix_content->pass_time_stamp || + buffer_prefix_content->pass_hash_result) ? 16 : 0)); + + /* Align start of internal context data to 16 byte */ + int_context_data_copy->int_context_offset = + (u8)(buffer_prefix_content->pass_prs_result ? 32 : + ((buffer_prefix_content->pass_time_stamp || + buffer_prefix_content->pass_hash_result) ? 64 : 0)); + + if (buffer_prefix_content->pass_prs_result) + buffer_offsets->prs_result_offset = + int_context_data_copy->ext_buf_offset; + if (buffer_prefix_content->pass_time_stamp) + buffer_offsets->time_stamp_offset = + buffer_prefix_content->pass_prs_result ? + (int_context_data_copy->ext_buf_offset + + sizeof(struct fman_prs_result)) : + int_context_data_copy->ext_buf_offset; + if (buffer_prefix_content->pass_hash_result) + /* If PR is not requested, whether TS is + * requested or not, IC will be copied from TS + */ + buffer_offsets->hash_result_offset = + buffer_prefix_content->pass_prs_result ? + (int_context_data_copy->ext_buf_offset + + sizeof(struct fman_prs_result) + 8) : + int_context_data_copy->ext_buf_offset + 8; + + if (int_context_data_copy->size) + buf_margins->start_margins = + (u16)(int_context_data_copy->ext_buf_offset + + int_context_data_copy->size); + else + /* No Internal Context passing, STartMargin is + * immediately after private_info + */ + buf_margins->start_margins = + buffer_prefix_content->priv_data_size; + + /* align data start */ + tmp = (u32)(buf_margins->start_margins % + buffer_prefix_content->data_align); + if (tmp) + buf_margins->start_margins += + (buffer_prefix_content->data_align - tmp); + buffer_offsets->data_offset = buf_margins->start_margins; + + return 0; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h new file mode 100644 index 000000000000..820b7f63088f --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_sp.h @@ -0,0 +1,103 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FM_SP_H +#define __FM_SP_H + +#include "fman.h" +#include <linux/types.h> + +#define ILLEGAL_BASE (~0) + +/* defaults */ +#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64 + +/* Registers bit fields */ +#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000 +#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000 +#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000 +#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000 +#define FMAN_SP_SG_DISABLE 0x80000000 + +/* shifts */ +#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16 +#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30 +#define FMAN_SP_IC_TO_EXT_SHIFT 16 +#define FMAN_SP_IC_FROM_INT_SHIFT 8 + +/* structure for defining internal context copying */ +struct fman_sp_int_context_data_copy { + /* < Offset in External buffer to which internal + * context is copied to (Rx) or taken from (Tx, Op). + */ + u16 ext_buf_offset; + /* Offset within internal context to copy from + * (Rx) or to copy to (Tx, Op). + */ + u8 int_context_offset; + /* Internal offset size to be copied */ + u16 size; +}; + +/* struct for defining external buffer margins */ +struct fman_sp_buf_margins { + /* Number of bytes to be left at the beginning + * of the external buffer (must be divisible by 16) + */ + u16 start_margins; + /* number of bytes to be left at the end + * of the external buffer(must be divisible by 16) + */ + u16 end_margins; +}; + +struct fman_sp_buffer_offsets { + u32 data_offset; + u32 prs_result_offset; + u32 time_stamp_offset; + u32 hash_result_offset; +}; + +int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy + *int_context_data_copy, + struct fman_buffer_prefix_content + *buffer_prefix_content, + struct fman_sp_buf_margins *buf_margins, + struct fman_sp_buffer_offsets + *buffer_offsets, + u8 *internal_buf_offset); + +void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools + *fm_ext_pools, + u8 *ordered_array, + u16 *sizes_array); + +#endif /* __FM_SP_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c new file mode 100644 index 000000000000..efabb04a1ae8 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -0,0 +1,786 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_tgec.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/bitrev.h> +#include <linux/io.h> +#include <linux/crc32.h> + +/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */ +#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff + +/* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_NO_LEN_CHK 0x00020000 +#define CMD_CFG_PAUSE_IGNORE 0x00000100 +#define CMF_CFG_CRC_FWD 0x00000040 +#define CMD_CFG_PROMIS_EN 0x00000010 +#define CMD_CFG_RX_EN 0x00000002 +#define CMD_CFG_TX_EN 0x00000001 + +/* Interrupt Mask Register (IMASK) */ +#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000 +#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000 +#define TGEC_IMASK_REM_FAULT 0x00004000 +#define TGEC_IMASK_LOC_FAULT 0x00002000 +#define TGEC_IMASK_TX_ECC_ER 0x00001000 +#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800 +#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400 +#define TGEC_IMASK_TX_ER 0x00000200 +#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100 +#define TGEC_IMASK_RX_ECC_ER 0x00000080 +#define TGEC_IMASK_RX_JAB_FRM 0x00000040 +#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020 +#define TGEC_IMASK_RX_RUNT_FRM 0x00000010 +#define TGEC_IMASK_RX_FRAG_FRM 0x00000008 +#define TGEC_IMASK_RX_LEN_ER 0x00000004 +#define TGEC_IMASK_RX_CRC_ER 0x00000002 +#define TGEC_IMASK_RX_ALIGN_ER 0x00000001 + +/* Hashtable Control Register (HASHTABLE_CTRL) */ +#define TGEC_HASH_MCAST_SHIFT 23 +#define TGEC_HASH_MCAST_EN 0x00000200 +#define TGEC_HASH_ADR_MSK 0x000001ff + +#define DEFAULT_TX_IPG_LENGTH 12 +#define DEFAULT_MAX_FRAME_LENGTH 0x600 +#define DEFAULT_PAUSE_QUANT 0xf000 + +/* number of pattern match registers (entries) */ +#define TGEC_NUM_OF_PADDRS 1 + +/* Group address bit indication */ +#define GROUP_ADDRESS 0x0000010000000000LL + +/* Hash table size (= 32 bits*8 regs) */ +#define TGEC_HASH_TABLE_SIZE 512 + +/* tGEC memory map */ +struct tgec_regs { + u32 tgec_id; /* 0x000 Controller ID */ + u32 reserved001[1]; /* 0x004 */ + u32 command_config; /* 0x008 Control and configuration */ + u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */ + u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */ + u32 maxfrm; /* 0x014 Maximum frame length */ + u32 pause_quant; /* 0x018 Pause quanta */ + u32 rx_fifo_sections; /* 0x01c */ + u32 tx_fifo_sections; /* 0x020 */ + u32 rx_fifo_almost_f_e; /* 0x024 */ + u32 tx_fifo_almost_f_e; /* 0x028 */ + u32 hashtable_ctrl; /* 0x02c Hash table control */ + u32 mdio_cfg_status; /* 0x030 */ + u32 mdio_command; /* 0x034 */ + u32 mdio_data; /* 0x038 */ + u32 mdio_regaddr; /* 0x03c */ + u32 status; /* 0x040 */ + u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */ + u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */ + u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */ + u32 rx_fifo_ptr_rd; /* 0x050 */ + u32 rx_fifo_ptr_wr; /* 0x054 */ + u32 tx_fifo_ptr_rd; /* 0x058 */ + u32 tx_fifo_ptr_wr; /* 0x05c */ + u32 imask; /* 0x060 Interrupt mask */ + u32 ievent; /* 0x064 Interrupt event */ + u32 udp_port; /* 0x068 Defines a UDP Port number */ + u32 type_1588v2; /* 0x06c Type field for 1588v2 */ + u32 reserved070[4]; /* 0x070 */ + /* 10Ge Statistics Counter */ + u32 tfrm_u; /* 80 aFramesTransmittedOK */ + u32 tfrm_l; /* 84 aFramesTransmittedOK */ + u32 rfrm_u; /* 88 aFramesReceivedOK */ + u32 rfrm_l; /* 8c aFramesReceivedOK */ + u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */ + u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */ + u32 raln_u; /* 98 aAlignmentErrors */ + u32 raln_l; /* 9c aAlignmentErrors */ + u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */ + u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */ + u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */ + u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */ + u32 rlong_u; /* B0 aFrameTooLongErrors */ + u32 rlong_l; /* B4 aFrameTooLongErrors */ + u32 rflr_u; /* B8 aInRangeLengthErrors */ + u32 rflr_l; /* Bc aInRangeLengthErrors */ + u32 tvlan_u; /* C0 VLANTransmittedOK */ + u32 tvlan_l; /* C4 VLANTransmittedOK */ + u32 rvlan_u; /* C8 VLANReceivedOK */ + u32 rvlan_l; /* Cc VLANReceivedOK */ + u32 toct_u; /* D0 if_out_octets */ + u32 toct_l; /* D4 if_out_octets */ + u32 roct_u; /* D8 if_in_octets */ + u32 roct_l; /* Dc if_in_octets */ + u32 ruca_u; /* E0 if_in_ucast_pkts */ + u32 ruca_l; /* E4 if_in_ucast_pkts */ + u32 rmca_u; /* E8 ifInMulticastPkts */ + u32 rmca_l; /* Ec ifInMulticastPkts */ + u32 rbca_u; /* F0 ifInBroadcastPkts */ + u32 rbca_l; /* F4 ifInBroadcastPkts */ + u32 terr_u; /* F8 if_out_errors */ + u32 terr_l; /* Fc if_out_errors */ + u32 reserved100[2]; /* 100-108 */ + u32 tuca_u; /* 108 if_out_ucast_pkts */ + u32 tuca_l; /* 10c if_out_ucast_pkts */ + u32 tmca_u; /* 110 ifOutMulticastPkts */ + u32 tmca_l; /* 114 ifOutMulticastPkts */ + u32 tbca_u; /* 118 ifOutBroadcastPkts */ + u32 tbca_l; /* 11c ifOutBroadcastPkts */ + u32 rdrp_u; /* 120 etherStatsDropEvents */ + u32 rdrp_l; /* 124 etherStatsDropEvents */ + u32 reoct_u; /* 128 etherStatsOctets */ + u32 reoct_l; /* 12c etherStatsOctets */ + u32 rpkt_u; /* 130 etherStatsPkts */ + u32 rpkt_l; /* 134 etherStatsPkts */ + u32 trund_u; /* 138 etherStatsUndersizePkts */ + u32 trund_l; /* 13c etherStatsUndersizePkts */ + u32 r64_u; /* 140 etherStatsPkts64Octets */ + u32 r64_l; /* 144 etherStatsPkts64Octets */ + u32 r127_u; /* 148 etherStatsPkts65to127Octets */ + u32 r127_l; /* 14c etherStatsPkts65to127Octets */ + u32 r255_u; /* 150 etherStatsPkts128to255Octets */ + u32 r255_l; /* 154 etherStatsPkts128to255Octets */ + u32 r511_u; /* 158 etherStatsPkts256to511Octets */ + u32 r511_l; /* 15c etherStatsPkts256to511Octets */ + u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */ + u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */ + u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */ + u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */ + u32 r1519x_u; /* 170 etherStatsPkts1519toX */ + u32 r1519x_l; /* 174 etherStatsPkts1519toX */ + u32 trovr_u; /* 178 etherStatsOversizePkts */ + u32 trovr_l; /* 17c etherStatsOversizePkts */ + u32 trjbr_u; /* 180 etherStatsJabbers */ + u32 trjbr_l; /* 184 etherStatsJabbers */ + u32 trfrg_u; /* 188 etherStatsFragments */ + u32 trfrg_l; /* 18C etherStatsFragments */ + u32 rerr_u; /* 190 if_in_errors */ + u32 rerr_l; /* 194 if_in_errors */ +}; + +struct tgec_cfg { + bool pause_ignore; + bool promiscuous_mode_enable; + u16 max_frame_length; + u16 pause_quant; + u32 tx_ipg_length; +}; + +struct fman_mac { + /* Pointer to the memory mapped registers. */ + struct tgec_regs __iomem *regs; + /* MAC address of device; */ + u64 addr; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + struct tgec_cfg *cfg; + void *fm; + struct fman_rev_info fm_rev_info; +}; + +static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr) +{ + u32 tmp0, tmp1; + + tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24); + tmp1 = (u32)(adr[4] | adr[5] << 8); + iowrite32be(tmp0, ®s->mac_addr_0); + iowrite32be(tmp1, ®s->mac_addr_1); +} + +static void set_dflts(struct tgec_cfg *cfg) +{ + cfg->promiscuous_mode_enable = false; + cfg->pause_ignore = false; + cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH; + cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH; + cfg->pause_quant = DEFAULT_PAUSE_QUANT; +} + +static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg, + u32 exception_mask) +{ + u32 tmp; + + /* Config */ + tmp = CMF_CFG_CRC_FWD; + if (cfg->promiscuous_mode_enable) + tmp |= CMD_CFG_PROMIS_EN; + if (cfg->pause_ignore) + tmp |= CMD_CFG_PAUSE_IGNORE; + /* Payload length check disable */ + tmp |= CMD_CFG_NO_LEN_CHK; + iowrite32be(tmp, ®s->command_config); + + /* Max Frame Length */ + iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm); + /* Pause Time */ + iowrite32be(cfg->pause_quant, ®s->pause_quant); + + /* clear all pending events and set-up interrupts */ + iowrite32be(0xffffffff, ®s->ievent); + iowrite32be(ioread32be(®s->imask) | exception_mask, ®s->imask); + + return 0; +} + +static int check_init_parameters(struct fman_mac *tgec) +{ + if (tgec->max_speed < SPEED_10000) { + pr_err("10G MAC driver only support 10G speed\n"); + return -EINVAL; + } + if (tgec->addr == 0) { + pr_err("Ethernet 10G MAC Must have valid MAC Address\n"); + return -EINVAL; + } + if (!tgec->exception_cb) { + pr_err("uninitialized exception_cb\n"); + return -EINVAL; + } + if (!tgec->event_cb) { + pr_err("uninitialized event_cb\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_10G_MDIO_SCAN_EVENT: + bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT; + break; + case FM_MAC_EX_10G_MDIO_CMD_CMPL: + bit_mask = TGEC_IMASK_MDIO_CMD_CMPL; + break; + case FM_MAC_EX_10G_REM_FAULT: + bit_mask = TGEC_IMASK_REM_FAULT; + break; + case FM_MAC_EX_10G_LOC_FAULT: + bit_mask = TGEC_IMASK_LOC_FAULT; + break; + case FM_MAC_EX_10G_TX_ECC_ER: + bit_mask = TGEC_IMASK_TX_ECC_ER; + break; + case FM_MAC_EX_10G_TX_FIFO_UNFL: + bit_mask = TGEC_IMASK_TX_FIFO_UNFL; + break; + case FM_MAC_EX_10G_TX_FIFO_OVFL: + bit_mask = TGEC_IMASK_TX_FIFO_OVFL; + break; + case FM_MAC_EX_10G_TX_ER: + bit_mask = TGEC_IMASK_TX_ER; + break; + case FM_MAC_EX_10G_RX_FIFO_OVFL: + bit_mask = TGEC_IMASK_RX_FIFO_OVFL; + break; + case FM_MAC_EX_10G_RX_ECC_ER: + bit_mask = TGEC_IMASK_RX_ECC_ER; + break; + case FM_MAC_EX_10G_RX_JAB_FRM: + bit_mask = TGEC_IMASK_RX_JAB_FRM; + break; + case FM_MAC_EX_10G_RX_OVRSZ_FRM: + bit_mask = TGEC_IMASK_RX_OVRSZ_FRM; + break; + case FM_MAC_EX_10G_RX_RUNT_FRM: + bit_mask = TGEC_IMASK_RX_RUNT_FRM; + break; + case FM_MAC_EX_10G_RX_FRAG_FRM: + bit_mask = TGEC_IMASK_RX_FRAG_FRM; + break; + case FM_MAC_EX_10G_RX_LEN_ER: + bit_mask = TGEC_IMASK_RX_LEN_ER; + break; + case FM_MAC_EX_10G_RX_CRC_ER: + bit_mask = TGEC_IMASK_RX_CRC_ER; + break; + case FM_MAC_EX_10G_RX_ALIGN_ER: + bit_mask = TGEC_IMASK_RX_ALIGN_ER; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static void tgec_err_exception(void *handle) +{ + struct fman_mac *tgec = (struct fman_mac *)handle; + struct tgec_regs __iomem *regs = tgec->regs; + u32 event; + + /* do not handle MDIO events */ + event = ioread32be(®s->ievent) & + ~(TGEC_IMASK_MDIO_SCAN_EVENT | + TGEC_IMASK_MDIO_CMD_CMPL); + + event &= ioread32be(®s->imask); + + iowrite32be(event, ®s->ievent); + + if (event & TGEC_IMASK_REM_FAULT) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT); + if (event & TGEC_IMASK_LOC_FAULT) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT); + if (event & TGEC_IMASK_TX_ECC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER); + if (event & TGEC_IMASK_TX_FIFO_UNFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL); + if (event & TGEC_IMASK_TX_FIFO_OVFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL); + if (event & TGEC_IMASK_TX_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER); + if (event & TGEC_IMASK_RX_FIFO_OVFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL); + if (event & TGEC_IMASK_RX_ECC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER); + if (event & TGEC_IMASK_RX_JAB_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM); + if (event & TGEC_IMASK_RX_OVRSZ_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM); + if (event & TGEC_IMASK_RX_RUNT_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM); + if (event & TGEC_IMASK_RX_FRAG_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM); + if (event & TGEC_IMASK_RX_LEN_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER); + if (event & TGEC_IMASK_RX_CRC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER); + if (event & TGEC_IMASK_RX_ALIGN_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER); +} + +static void free_init_resources(struct fman_mac *tgec) +{ + fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id, + FMAN_INTR_TYPE_ERR); + + /* release the driver's group hash table */ + free_hash_table(tgec->multicast_addr_hash); + tgec->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(tgec->unicast_addr_hash); + tgec->unicast_addr_hash = NULL; +} + +static bool is_init_done(struct tgec_cfg *cfg) +{ + /* Checks if tGEC driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +int tgec_enable(struct fman_mac *tgec, enum comm_mode mode) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp |= CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_disable(struct fman_mac *tgec, enum comm_mode mode) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp &= ~CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (new_val) + tmp |= CMD_CFG_PROMIS_EN; + else + tmp &= ~CMD_CFG_PROMIS_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val) +{ + if (is_init_done(tgec->cfg)) + return -EINVAL; + + tgec->cfg->max_frame_length = new_val; + + return 0; +} + +int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority, + u16 pause_time, u16 __maybe_unused thresh_time) +{ + struct tgec_regs __iomem *regs = tgec->regs; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + iowrite32be((u32)pause_time, ®s->pause_quant); + + return 0; +} + +int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (!en) + tmp |= CMD_CFG_PAUSE_IGNORE; + else + tmp &= ~CMD_CFG_PAUSE_IGNORE; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr) +{ + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); + set_mac_address(tgec->regs, (u8 *)(*p_enet_addr)); + + return 0; +} + +int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) +{ + struct tgec_regs __iomem *regs = tgec->regs; + struct eth_hash_entry *hash_entry; + u32 crc = 0xFFFFFFFF, hash; + u64 addr; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + if (!(addr & GROUP_ADDRESS)) { + /* Unicast addresses not supported in hash */ + pr_err("Unicast Address\n"); + return -EINVAL; + } + /* CRC calculation */ + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + /* Take 9 MSB bits */ + hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + list_add_tail(&hash_entry->node, + &tgec->multicast_addr_hash->lsts[hash]); + iowrite32be((hash | TGEC_HASH_MCAST_EN), ®s->hashtable_ctrl); + + return 0; +} + +int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) +{ + struct tgec_regs __iomem *regs = tgec->regs; + struct eth_hash_entry *hash_entry = NULL; + struct list_head *pos; + u32 crc = 0xFFFFFFFF, hash; + u64 addr; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + addr = ((*(u64 *)eth_addr) >> 16); + + /* CRC calculation */ + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + /* Take 9 MSB bits */ + hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; + + list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&tgec->multicast_addr_hash->lsts[hash])) + iowrite32be((hash & ~TGEC_HASH_MCAST_EN), + ®s->hashtable_ctrl); + + return 0; +} + +int tgec_get_version(struct fman_mac *tgec, u32 *mac_version) +{ + struct tgec_regs __iomem *regs = tgec->regs; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + *mac_version = ioread32be(®s->tgec_id); + + return 0; +} + +int tgec_set_exception(struct fman_mac *tgec, + enum fman_mac_exceptions exception, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 bit_mask = 0; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + tgec->exceptions |= bit_mask; + else + tgec->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + if (enable) + iowrite32be(ioread32be(®s->imask) | bit_mask, ®s->imask); + else + iowrite32be(ioread32be(®s->imask) & ~bit_mask, ®s->imask); + + return 0; +} + +int tgec_init(struct fman_mac *tgec) +{ + struct tgec_cfg *cfg; + enet_addr_t eth_addr; + int err; + + if (is_init_done(tgec->cfg)) + return -EINVAL; + + if (DEFAULT_RESET_ON_INIT && + (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) { + pr_err("Can't reset MAC!\n"); + return -EINVAL; + } + + err = check_init_parameters(tgec); + if (err) + return err; + + cfg = tgec->cfg; + + MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr); + set_mac_address(tgec->regs, (u8 *)eth_addr); + + /* interrupts */ + /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */ + if (tgec->fm_rev_info.major <= 2) + tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT | + TGEC_IMASK_LOC_FAULT); + + err = init(tgec->regs, cfg, tgec->exceptions); + if (err) { + free_init_resources(tgec); + pr_err("TGEC version doesn't support this i/f mode\n"); + return err; + } + + /* Max Frame Length */ + err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id, + cfg->max_frame_length); + if (err) { + pr_err("Setting max frame length FAILED\n"); + free_init_resources(tgec); + return -EINVAL; + } + + /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */ + if (tgec->fm_rev_info.major == 2) { + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + /* restore the default tx ipg Length */ + tmp = (ioread32be(®s->tx_ipg_len) & + ~TGEC_TX_IPG_LENGTH_MASK) | 12; + + iowrite32be(tmp, ®s->tx_ipg_len); + } + + tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE); + if (!tgec->multicast_addr_hash) { + free_init_resources(tgec); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE); + if (!tgec->unicast_addr_hash) { + free_init_resources(tgec); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id, + FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec); + + kfree(cfg); + tgec->cfg = NULL; + + return 0; +} + +int tgec_free(struct fman_mac *tgec) +{ + free_init_resources(tgec); + + if (tgec->cfg) + tgec->cfg = NULL; + + kfree(tgec->cfg); + kfree(tgec); + + return 0; +} + +struct fman_mac *tgec_config(struct fman_mac_params *params) +{ + struct fman_mac *tgec; + struct tgec_cfg *cfg; + void __iomem *base_addr; + + base_addr = params->base_addr; + /* allocate memory for the UCC GETH data structure. */ + tgec = kzalloc(sizeof(*tgec), GFP_KERNEL); + if (!tgec) + return NULL; + + /* allocate memory for the 10G MAC driver parameters data structure. */ + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + tgec_free(tgec); + return NULL; + } + + /* Plant parameter structure pointer */ + tgec->cfg = cfg; + + set_dflts(cfg); + + tgec->regs = base_addr; + tgec->addr = ENET_ADDR_TO_UINT64(params->addr); + tgec->max_speed = params->max_speed; + tgec->mac_id = params->mac_id; + tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT | + TGEC_IMASK_REM_FAULT | + TGEC_IMASK_LOC_FAULT | + TGEC_IMASK_TX_ECC_ER | + TGEC_IMASK_TX_FIFO_UNFL | + TGEC_IMASK_TX_FIFO_OVFL | + TGEC_IMASK_TX_ER | + TGEC_IMASK_RX_FIFO_OVFL | + TGEC_IMASK_RX_ECC_ER | + TGEC_IMASK_RX_JAB_FRM | + TGEC_IMASK_RX_OVRSZ_FRM | + TGEC_IMASK_RX_RUNT_FRM | + TGEC_IMASK_RX_FRAG_FRM | + TGEC_IMASK_RX_CRC_ER | + TGEC_IMASK_RX_ALIGN_ER); + tgec->exception_cb = params->exception_cb; + tgec->event_cb = params->event_cb; + tgec->dev_id = params->dev_id; + tgec->fm = params->fm; + + /* Save FMan revision */ + fman_get_revision(tgec->fm, &tgec->fm_rev_info); + + return tgec; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h new file mode 100644 index 000000000000..514bba9f47ce --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -0,0 +1,55 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TGEC_H +#define __TGEC_H + +#include "fman_mac.h" + +struct fman_mac *tgec_config(struct fman_mac_params *params); +int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val); +int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr); +int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val); +int tgec_enable(struct fman_mac *tgec, enum comm_mode mode); +int tgec_disable(struct fman_mac *tgec, enum comm_mode mode); +int tgec_init(struct fman_mac *tgec); +int tgec_free(struct fman_mac *tgec); +int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en); +int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority, + u16 pause_time, u16 thresh_time); +int tgec_set_exception(struct fman_mac *tgec, + enum fman_mac_exceptions exception, bool enable); +int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); +int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); +int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); + +#endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c new file mode 100644 index 000000000000..e33d9d24c1db --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -0,0 +1,977 @@ +/* Copyright 2008-2015 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/device.h> +#include <linux/phy.h> +#include <linux/netdevice.h> +#include <linux/phy_fixed.h> +#include <linux/etherdevice.h> +#include <linux/libfdt_env.h> + +#include "mac.h" +#include "fman_mac.h" +#include "fman_dtsec.h" +#include "fman_tgec.h" +#include "fman_memac.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL FMan MAC API based driver"); + +struct mac_priv_s { + struct device *dev; + void __iomem *vaddr; + u8 cell_index; + phy_interface_t phy_if; + struct fman *fman; + struct device_node *phy_node; + struct device_node *internal_phy_node; + /* List of multicast addresses */ + struct list_head mc_addr_list; + struct platform_device *eth_dev; + struct fixed_phy_status *fixed_link; + u16 speed; + u16 max_speed; + + int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode); + int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode); +}; + +struct mac_address { + u8 addr[ETH_ALEN]; + struct list_head list; +}; + +static void mac_exception(void *handle, enum fman_mac_exceptions ex) +{ + struct mac_device *mac_dev; + struct mac_priv_s *priv; + + mac_dev = handle; + priv = mac_dev->priv; + + if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) { + /* don't flag RX FIFO after the first */ + mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_10G_RX_FIFO_OVFL, false); + dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex); + } + + dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c", + __func__, ex); +} + +static void set_fman_mac_params(struct mac_device *mac_dev, + struct fman_mac_params *params) +{ + struct mac_priv_s *priv = mac_dev->priv; + + params->base_addr = (typeof(params->base_addr)) + devm_ioremap(priv->dev, mac_dev->res->start, + resource_size(mac_dev->res)); + memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); + params->max_speed = priv->max_speed; + params->phy_if = priv->phy_if; + params->basex_if = false; + params->mac_id = priv->cell_index; + params->fm = (void *)priv->fman; + params->exception_cb = mac_exception; + params->event_cb = mac_exception; + params->dev_id = mac_dev; + params->internal_phy_node = priv->internal_phy_node; +} + +static int tgec_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + u32 version; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + mac_dev->fman_mac = tgec_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = tgec_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + /* For 10G MAC, disable Tx ECC exception */ + err = mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_10G_TX_ECC_ER, false); + if (err < 0) + goto _return_fm_mac_free; + + err = tgec_get_version(mac_dev->fman_mac, &version); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version); + + goto _return; + +_return_fm_mac_free: + tgec_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int dtsec_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + u32 version; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + mac_dev->fman_mac = dtsec_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + /* For 1G MAC, disable by default the MIB counters overflow interrupt */ + err = mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_get_version(mac_dev->fman_mac, &version); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version); + + goto _return; + +_return_fm_mac_free: + dtsec_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int memac_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + if (priv->max_speed == SPEED_10000) + params.phy_if = PHY_INTERFACE_MODE_XGMII; + + mac_dev->fman_mac = memac_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_cfg_reset_on_init(mac_dev->fman_mac, true); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan MEMAC\n"); + + goto _return; + +_return_fm_mac_free: + memac_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int start(struct mac_device *mac_dev) +{ + int err; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct mac_priv_s *priv = mac_dev->priv; + + err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); + if (!err && phy_dev) + phy_start(phy_dev); + + return err; +} + +static int stop(struct mac_device *mac_dev) +{ + struct mac_priv_s *priv = mac_dev->priv; + + if (mac_dev->phy_dev) + phy_stop(mac_dev->phy_dev); + + return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); +} + +static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev) +{ + struct mac_priv_s *priv; + struct mac_address *old_addr, *tmp; + struct netdev_hw_addr *ha; + int err; + enet_addr_t *addr; + + priv = mac_dev->priv; + + /* Clear previous address list */ + list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) { + addr = (enet_addr_t *)old_addr->addr; + err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr); + if (err < 0) + return err; + + list_del(&old_addr->list); + kfree(old_addr); + } + + /* Add all the addresses from the new list */ + netdev_for_each_mc_addr(ha, net_dev) { + addr = (enet_addr_t *)ha->addr; + err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr); + if (err < 0) + return err; + + tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + ether_addr_copy(tmp->addr, ha->addr); + list_add(&tmp->list, &priv->mc_addr_list); + } + return 0; +} + +/** + * fman_set_mac_active_pause + * @mac_dev: A pointer to the MAC device + * @rx: Pause frame setting for RX + * @tx: Pause frame setting for TX + * + * Set the MAC RX/TX PAUSE frames settings + * + * Avoid redundant calls to FMD, if the MAC driver already contains the desired + * active PAUSE settings. Otherwise, the new active settings should be reflected + * in FMan. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) +{ + struct fman_mac *fman_mac = mac_dev->fman_mac; + int err = 0; + + if (rx != mac_dev->rx_pause_active) { + err = mac_dev->set_rx_pause(fman_mac, rx); + if (likely(err == 0)) + mac_dev->rx_pause_active = rx; + } + + if (tx != mac_dev->tx_pause_active) { + u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE); + + err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0); + + if (likely(err == 0)) + mac_dev->tx_pause_active = tx; + } + + return err; +} +EXPORT_SYMBOL(fman_set_mac_active_pause); + +/** + * fman_get_pause_cfg + * @mac_dev: A pointer to the MAC device + * @rx: Return value for RX setting + * @tx: Return value for TX setting + * + * Determine the MAC RX/TX PAUSE frames settings based on PHY + * autonegotiation or values set by eththool. + * + * Return: Pointer to FMan device. + */ +void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, + bool *tx_pause) +{ + struct phy_device *phy_dev = mac_dev->phy_dev; + u16 lcl_adv, rmt_adv; + u8 flowctrl; + + *rx_pause = *tx_pause = false; + + if (!phy_dev->duplex) + return; + + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings + * are those set by ethtool. + */ + if (!mac_dev->autoneg_pause) { + *rx_pause = mac_dev->rx_pause_req; + *tx_pause = mac_dev->tx_pause_req; + return; + } + + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE + * settings depend on the result of the link negotiation. + */ + + /* get local capabilities */ + lcl_adv = 0; + if (phy_dev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phy_dev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + /* get link partner capabilities */ + rmt_adv = 0; + if (phy_dev->pause) + rmt_adv |= LPA_PAUSE_CAP; + if (phy_dev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + /* Calculate TX/RX settings based on local and peer advertised + * symmetric/asymmetric PAUSE capabilities. + */ + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_RX) + *rx_pause = true; + if (flowctrl & FLOW_CTRL_TX) + *tx_pause = true; +} +EXPORT_SYMBOL(fman_get_pause_cfg); + +static void adjust_link_void(struct net_device *net_dev) +{ +} + +static void adjust_link_dtsec(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa_eth_data *eth_data = dev->platform_data; + struct mac_device *mac_dev = eth_data->mac_dev; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct fman_mac *fman_mac; + bool rx_pause, tx_pause; + int err; + + fman_mac = mac_dev->fman_mac; + if (!phy_dev->link) { + dtsec_restart_autoneg(fman_mac); + + return; + } + + dtsec_adjust_link(fman_mac, phy_dev->speed); + fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); + err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); + if (err < 0) + netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); +} + +static void adjust_link_memac(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa_eth_data *eth_data = dev->platform_data; + struct mac_device *mac_dev = eth_data->mac_dev; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct fman_mac *fman_mac; + bool rx_pause, tx_pause; + int err; + + fman_mac = mac_dev->fman_mac; + memac_adjust_link(fman_mac, phy_dev->speed); + + fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); + err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); + if (err < 0) + netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); +} + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *net_dev, + struct mac_device *mac_dev, + void (*adj_lnk)(struct net_device *)) +{ + struct phy_device *phy_dev; + struct mac_priv_s *priv = mac_dev->priv; + + phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0, + priv->phy_if); + if (!phy_dev) { + netdev_err(net_dev, "Could not connect to PHY\n"); + return -ENODEV; + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= mac_dev->if_support; + /* Enable the symmetric and asymmetric PAUSE frame advertisements, + * as most of the PHY drivers do not enable them by default. + */ + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phy_dev->advertising = phy_dev->supported; + + mac_dev->phy_dev = phy_dev; + + return 0; +} + +static int dtsec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, &adjust_link_dtsec); +} + +static int tgec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, adjust_link_void); +} + +static int memac_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, &adjust_link_memac); +} + +static void setup_dtsec(struct mac_device *mac_dev) +{ + mac_dev->init_phy = dtsec_init_phy; + mac_dev->init = dtsec_initialization; + mac_dev->set_promisc = dtsec_set_promiscuous; + mac_dev->change_addr = dtsec_modify_mac_address; + mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address; + mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; + mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; + mac_dev->set_exception = dtsec_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = dtsec_enable; + mac_dev->priv->disable = dtsec_disable; +} + +static void setup_tgec(struct mac_device *mac_dev) +{ + mac_dev->init_phy = tgec_init_phy; + mac_dev->init = tgec_initialization; + mac_dev->set_promisc = tgec_set_promiscuous; + mac_dev->change_addr = tgec_modify_mac_address; + mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address; + mac_dev->set_tx_pause = tgec_set_tx_pause_frames; + mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; + mac_dev->set_exception = tgec_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = tgec_enable; + mac_dev->priv->disable = tgec_disable; +} + +static void setup_memac(struct mac_device *mac_dev) +{ + mac_dev->init_phy = memac_init_phy; + mac_dev->init = memac_initialization; + mac_dev->set_promisc = memac_set_promiscuous; + mac_dev->change_addr = memac_modify_mac_address; + mac_dev->add_hash_mac_addr = memac_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address; + mac_dev->set_tx_pause = memac_set_tx_pause_frames; + mac_dev->set_rx_pause = memac_accept_rx_pause_frames; + mac_dev->set_exception = memac_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = memac_enable; + mac_dev->priv->disable = memac_disable; +} + +#define DTSEC_SUPPORTED \ + (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_Autoneg \ + | SUPPORTED_Pause \ + | SUPPORTED_Asym_Pause \ + | SUPPORTED_MII) + +static DEFINE_MUTEX(eth_lock); + +static const char phy_str[][11] = { + [PHY_INTERFACE_MODE_MII] = "mii", + [PHY_INTERFACE_MODE_GMII] = "gmii", + [PHY_INTERFACE_MODE_SGMII] = "sgmii", + [PHY_INTERFACE_MODE_TBI] = "tbi", + [PHY_INTERFACE_MODE_RMII] = "rmii", + [PHY_INTERFACE_MODE_RGMII] = "rgmii", + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", + [PHY_INTERFACE_MODE_RTBI] = "rtbi", + [PHY_INTERFACE_MODE_XGMII] = "xgmii" +}; + +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phy_str); i++) + if (strcmp(str, phy_str[i]) == 0) + return (phy_interface_t)i; + + return PHY_INTERFACE_MODE_MII; +} + +static const u16 phy2speed[] = { + [PHY_INTERFACE_MODE_MII] = SPEED_100, + [PHY_INTERFACE_MODE_GMII] = SPEED_1000, + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_TBI] = SPEED_1000, + [PHY_INTERFACE_MODE_RMII] = SPEED_100, + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 +}; + +static struct platform_device *dpaa_eth_add_device(int fman_id, + struct mac_device *mac_dev, + struct device_node *node) +{ + struct platform_device *pdev; + struct dpaa_eth_data data; + struct mac_priv_s *priv; + static int dpaa_eth_dev_cnt; + int ret; + + priv = mac_dev->priv; + + data.mac_dev = mac_dev; + data.mac_hw_id = priv->cell_index; + data.fman_hw_id = fman_id; + data.mac_node = node; + + mutex_lock(ð_lock); + + pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt); + if (!pdev) { + ret = -ENOMEM; + goto no_mem; + } + + ret = platform_device_add_data(pdev, &data, sizeof(data)); + if (ret) + goto err; + + ret = platform_device_add(pdev); + if (ret) + goto err; + + dpaa_eth_dev_cnt++; + mutex_unlock(ð_lock); + + return pdev; + +err: + platform_device_put(pdev); +no_mem: + mutex_unlock(ð_lock); + + return ERR_PTR(ret); +} + +static const struct of_device_id mac_match[] = { + { .compatible = "fsl,fman-dtsec" }, + { .compatible = "fsl,fman-xgec" }, + { .compatible = "fsl,fman-memac" }, + {} +}; +MODULE_DEVICE_TABLE(of, mac_match); + +static int mac_probe(struct platform_device *_of_dev) +{ + int err, i, lenp, nph; + struct device *dev; + struct device_node *mac_node, *dev_node; + struct mac_device *mac_dev; + struct platform_device *of_dev; + struct resource res; + struct mac_priv_s *priv; + const u8 *mac_addr; + const char *char_prop; + const u32 *u32_prop; + u8 fman_id; + + dev = &_of_dev->dev; + mac_node = dev->of_node; + + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL); + if (!mac_dev) { + err = -ENOMEM; + dev_err(dev, "devm_kzalloc() = %d\n", err); + goto _return; + } + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto _return; + } + + /* Save private information */ + mac_dev->priv = priv; + priv->dev = dev; + + if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) { + setup_dtsec(mac_dev); + priv->internal_phy_node = of_parse_phandle(mac_node, + "tbi-handle", 0); + } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) { + setup_tgec(mac_dev); + } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) { + setup_memac(mac_dev); + priv->internal_phy_node = of_parse_phandle(mac_node, + "pcsphy-handle", 0); + } else { + dev_err(dev, "MAC node (%s) contains unsupported MAC\n", + mac_node->full_name); + err = -EINVAL; + goto _return; + } + + /* Register mac_dev */ + dev_set_drvdata(dev, mac_dev); + + INIT_LIST_HEAD(&priv->mc_addr_list); + + /* Get the FM node */ + dev_node = of_get_parent(mac_node); + if (!dev_node) { + dev_err(dev, "of_get_parent(%s) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + + of_dev = of_find_device_by_node(dev_node); + if (!of_dev) { + dev_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + /* Get the FMan cell-index */ + u32_prop = of_get_property(dev_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(dev, "of_get_property(%s, cell-index) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + WARN_ON(lenp != sizeof(u32)); + /* cell-index 0 => FMan id 1 */ + fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1); + + priv->fman = fman_bind(&of_dev->dev); + if (!priv->fman) { + dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name); + err = -ENODEV; + goto _return_of_node_put; + } + + of_node_put(dev_node); + + /* Get the address of the memory mapped registers */ + err = of_address_to_resource(mac_node, 0, &res); + if (err < 0) { + dev_err(dev, "of_address_to_resource(%s) = %d\n", + mac_node->full_name, err); + goto _return_dev_set_drvdata; + } + + mac_dev->res = __devm_request_region(dev, + fman_get_mem_region(priv->fman), + res.start, res.end + 1 - res.start, + "mac"); + if (!mac_dev->res) { + dev_err(dev, "__devm_request_mem_region(mac) failed\n"); + err = -EBUSY; + goto _return_dev_set_drvdata; + } + + priv->vaddr = devm_ioremap(dev, mac_dev->res->start, + mac_dev->res->end + 1 - mac_dev->res->start); + if (!priv->vaddr) { + dev_err(dev, "devm_ioremap() failed\n"); + err = -EIO; + goto _return_dev_set_drvdata; + } + + if (!of_device_is_available(mac_node)) { + devm_iounmap(dev, priv->vaddr); + __devm_release_region(dev, fman_get_mem_region(priv->fman), + res.start, res.end + 1 - res.start); + devm_kfree(dev, mac_dev); + dev_set_drvdata(dev, NULL); + return -ENODEV; + } + + /* Get the cell-index */ + u32_prop = of_get_property(mac_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(dev, "of_get_property(%s, cell-index) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + WARN_ON(lenp != sizeof(u32)); + priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]); + + /* Get the MAC address */ + mac_addr = of_get_mac_address(mac_node); + if (!mac_addr) { + dev_err(dev, "of_get_mac_address(%s) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); + + /* Get the port handles */ + nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); + if (unlikely(nph < 0)) { + dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n", + mac_node->full_name); + err = nph; + goto _return_dev_set_drvdata; + } + + if (nph != ARRAY_SIZE(mac_dev->port)) { + dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + + for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + /* Find the port node */ + dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); + if (!dev_node) { + dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + of_dev = of_find_device_by_node(dev_node); + if (!of_dev) { + dev_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + mac_dev->port[i] = fman_port_bind(&of_dev->dev); + if (!mac_dev->port[i]) { + dev_err(dev, "dev_get_drvdata(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + of_node_put(dev_node); + } + + /* Get the PHY connection type */ + char_prop = (const char *)of_get_property(mac_node, + "phy-connection-type", NULL); + if (!char_prop) { + dev_warn(dev, + "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n", + mac_node->full_name); + priv->phy_if = PHY_INTERFACE_MODE_MII; + } else { + priv->phy_if = str2phy(char_prop); + } + + priv->speed = phy2speed[priv->phy_if]; + priv->max_speed = priv->speed; + mac_dev->if_support = DTSEC_SUPPORTED; + /* We don't support half-duplex in SGMII mode */ + if (priv->phy_if == PHY_INTERFACE_MODE_SGMII) + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_100baseT_Half); + + /* Gigabit support (no half-duplex) */ + if (priv->max_speed == 1000) + mac_dev->if_support |= SUPPORTED_1000baseT_Full; + + /* The 10G interface only supports one mode */ + if (priv->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->if_support = SUPPORTED_10000baseT_Full; + + /* Get the rest of the PHY information */ + priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); + if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) { + struct phy_device *phy; + + err = of_phy_register_fixed_link(mac_node); + if (err) + goto _return_dev_set_drvdata; + + priv->fixed_link = kzalloc(sizeof(*priv->fixed_link), + GFP_KERNEL); + if (!priv->fixed_link) + goto _return_dev_set_drvdata; + + priv->phy_node = of_node_get(mac_node); + phy = of_phy_find_device(priv->phy_node); + if (!phy) + goto _return_dev_set_drvdata; + + priv->fixed_link->link = phy->link; + priv->fixed_link->speed = phy->speed; + priv->fixed_link->duplex = phy->duplex; + priv->fixed_link->pause = phy->pause; + priv->fixed_link->asym_pause = phy->asym_pause; + } + + err = mac_dev->init(mac_dev); + if (err < 0) { + dev_err(dev, "mac_dev->init() = %d\n", err); + of_node_put(priv->phy_node); + goto _return_dev_set_drvdata; + } + + /* pause frame autonegotiation enabled */ + mac_dev->autoneg_pause = true; + + /* By intializing the values to false, force FMD to enable PAUSE frames + * on RX and TX + */ + mac_dev->rx_pause_req = true; + mac_dev->tx_pause_req = true; + mac_dev->rx_pause_active = false; + mac_dev->tx_pause_active = false; + err = fman_set_mac_active_pause(mac_dev, true, true); + if (err < 0) + dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); + + dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); + + priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node); + if (IS_ERR(priv->eth_dev)) { + dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", + priv->cell_index); + priv->eth_dev = NULL; + } + + goto _return; + +_return_of_node_put: + of_node_put(dev_node); +_return_dev_set_drvdata: + kfree(priv->fixed_link); + dev_set_drvdata(dev, NULL); +_return: + return err; +} + +static struct platform_driver mac_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mac_match, + }, + .probe = mac_probe, +}; + +builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h new file mode 100644 index 000000000000..0211cc9a46d6 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -0,0 +1,97 @@ +/* Copyright 2008-2015 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MAC_H +#define __MAC_H + +#include <linux/device.h> +#include <linux/if_ether.h> +#include <linux/phy.h> +#include <linux/list.h> + +#include "fman_port.h" +#include "fman.h" +#include "fman_mac.h" + +struct fman_mac; +struct mac_priv_s; + +struct mac_device { + struct resource *res; + u8 addr[ETH_ALEN]; + struct fman_port *port[2]; + u32 if_support; + struct phy_device *phy_dev; + + bool autoneg_pause; + bool rx_pause_req; + bool tx_pause_req; + bool rx_pause_active; + bool tx_pause_active; + bool promisc; + + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); + int (*init)(struct mac_device *mac_dev); + int (*start)(struct mac_device *mac_dev); + int (*stop)(struct mac_device *mac_dev); + int (*set_promisc)(struct fman_mac *mac_dev, bool enable); + int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); + int (*set_multi)(struct net_device *net_dev, + struct mac_device *mac_dev); + int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); + int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority, + u16 pause_time, u16 thresh_time); + int (*set_exception)(struct fman_mac *mac_dev, + enum fman_mac_exceptions exception, bool enable); + int (*add_hash_mac_addr)(struct fman_mac *mac_dev, + enet_addr_t *eth_addr); + int (*remove_hash_mac_addr)(struct fman_mac *mac_dev, + enet_addr_t *eth_addr); + + struct fman_mac *fman_mac; + struct mac_priv_s *priv; +}; + +struct dpaa_eth_data { + struct device_node *mac_node; + struct mac_device *mac_dev; + int mac_hw_id; + int fman_hw_id; +}; + +extern const char *mac_driver_description; + +int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx); + +void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, + bool *tx_pause); + +#endif /* __MAC_H */ diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cf8e54652df9..48a9c176e0d1 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1050,7 +1050,7 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); - netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); + netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); ndev->ethtool_ops = &fs_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 08f5b911d96b..52e0091b4fb2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 016743e355de..bade2f8f9b5c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -254,7 +254,7 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; - struct mii_bus* mii = fep->phydev->bus; + struct mii_bus *mii = fep->phydev->mdio.bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); @@ -363,7 +363,7 @@ static void stop(struct net_device *dev) const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - struct fec_info* feci= fep->phydev->bus->priv; + struct fec_info *feci = fep->phydev->mdio.bus->priv; int i; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 68a428de0bc0..1f015edcca22 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -172,23 +172,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) goto out_free_bus; new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); out_free_bus: @@ -205,7 +198,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 2be383e6d258..a89267b94352 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -166,23 +166,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); out_res: @@ -200,7 +193,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); iounmap(fec->fecp); kfree(fec); mdiobus_free(bus); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 55c36230e176..622005abf859 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -69,7 +69,6 @@ struct fsl_pq_mdio { struct fsl_pq_mdio_priv { void __iomem *map; struct fsl_pq_mii __iomem *regs; - int irqs[PHY_MAX_ADDR]; }; /* @@ -401,7 +400,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) new_bus->read = &fsl_pq_mdio_read; new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; - new_bus->irq = priv->irqs; err = of_address_to_resource(np, 0, &res); if (err < 0) { @@ -464,7 +462,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) * address). Print error message but continue anyway. */ if ((void *)tbipa > priv->map + resource_size(&res) - 4) - dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", ((void *)tbipa - priv->map) + 4); iowrite32be(be32_to_cpup(prop), tbipa); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7cf898455e60..2aa7b401cc3b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -738,7 +738,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; - struct property *stash; u32 stash_len = 0; u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; @@ -854,9 +853,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) goto err_grp_init; } - stash = of_find_property(np, "bd-stash", NULL); - - if (stash) { + if (of_property_read_bool(np, "bd-stash")) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } @@ -894,7 +891,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | - FSL_GIANFAR_DEV_HAS_TIMER; + FSL_GIANFAR_DEV_HAS_TIMER | + FSL_GIANFAR_DEV_HAS_RX_FILER; err = of_property_read_string(np, "phy-connection-type", &ctype); @@ -1347,12 +1345,12 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->poll_mode == GFAR_SQ_POLLING) { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx_sq, GFAR_DEV_WEIGHT); - netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx_sq, 2); } else { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx, GFAR_DEV_WEIGHT); - netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx, 2); } } @@ -1396,8 +1394,9 @@ static int gfar_probe(struct platform_device *ofdev) priv->rx_queue[i]->rxic = DEFAULT_RXIC; } - /* always enable rx filer */ - priv->rx_filer_enable = 1; + /* Always enable rx filer if available */ + priv->rx_filer_enable = + (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; /* use pritority h/w tx queue scheduling for single queue devices */ @@ -1835,7 +1834,7 @@ static void gfar_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1850,7 +1849,7 @@ static void gfar_configure_serdes(struct net_device *dev) BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f266b20f9ef5..cb77667971a7 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -923,6 +923,7 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 +#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 #if (MAXGROUPS == 2) #define DEFAULT_MAPPING 0xAA diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 650f7888e32b..cbddbe2d0429 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1385,7 +1385,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1705,7 +1705,7 @@ static void uec_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1716,7 +1716,7 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } /* Configure the PHY for dev. diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cec95ac8687d..6ca94dc3dda3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -35,7 +35,7 @@ #include <linux/phy.h> #include <linux/types.h> -#define HNAE_DRIVER_VERSION "1.3.0" +#define HNAE_DRIVER_VERSION "2.0" #define HNAE_DRIVER_NAME "hns" #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver" @@ -63,6 +63,7 @@ do { \ #define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0') #define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0') +#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1) #define AE_NAME_SIZE 16 /* some said the RX and TX RCB format should not be the same in the future. But @@ -144,23 +145,61 @@ enum hnae_led_state { #define HNS_RXD_ASID_S 24 #define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S) +#define HNSV2_TXD_BUFNUM_S 0 +#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S) +#define HNSV2_TXD_RI_B 1 +#define HNSV2_TXD_L4CS_B 2 +#define HNSV2_TXD_L3CS_B 3 +#define HNSV2_TXD_FE_B 4 +#define HNSV2_TXD_VLD_B 5 + +#define HNSV2_TXD_TSE_B 0 +#define HNSV2_TXD_VLAN_EN_B 1 +#define HNSV2_TXD_SNAP_B 2 +#define HNSV2_TXD_IPV6_B 3 +#define HNSV2_TXD_SCTP_B 4 + /* hardware spec ring buffer format */ struct __packed hnae_desc { __le64 addr; union { struct { - __le16 asid_bufnum_pid; + union { + __le16 asid_bufnum_pid; + __le16 asid; + }; __le16 send_size; - __le32 flag_ipoffset; - __le32 reserved_3[4]; + union { + __le32 flag_ipoffset; + struct { + __u8 bn_pid; + __u8 ra_ri_cs_fe_vld; + __u8 ip_offset; + __u8 tse_vlan_snap_v6_sctp_nth; + }; + }; + __le16 mss; + __u8 l4_len; + __u8 reserved1; + __le16 paylen; + __u8 vmid; + __u8 qid; + __le32 reserved2[2]; } tx; struct { __le32 ipoff_bnum_pid_flag; __le16 pkt_len; __le16 size; - __le32 vlan_pri_asid; - __le32 reserved_2[3]; + union { + __le32 vlan_pri_asid; + struct { + __le16 asid; + __le16 vlan_cfi_pri; + }; + }; + __le32 rss_hash; + __le32 reserved_1[2]; } rx; }; }; @@ -302,7 +341,8 @@ struct hnae_queue { void __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ - struct hnae_ring rx_ring, tx_ring; + struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; + struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp; struct hnae_handle *handle; }; @@ -435,6 +475,7 @@ struct hnae_ae_ops { int (*set_mac_addr)(struct hnae_handle *handle, void *p); int (*set_mc_addr)(struct hnae_handle *handle, void *addr); int (*set_mtu)(struct hnae_handle *handle, int new_mtu); + void (*set_tso_stats)(struct hnae_handle *handle, int enable); void (*update_stats)(struct hnae_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae_handle *handle, u64 *data); @@ -446,6 +487,12 @@ struct hnae_ae_ops { enum hnae_led_state status); void (*get_regs)(struct hnae_handle *handle, void *data); int (*get_regs_len)(struct hnae_handle *handle); + u32 (*get_rss_key_size)(struct hnae_handle *handle); + u32 (*get_rss_indir_size)(struct hnae_handle *handle); + int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); }; struct hnae_ae_dev { @@ -551,11 +598,9 @@ static inline void hnae_replace_buffer(struct hnae_ring *ring, int i, struct hnae_desc_cb *res_cb) { struct hnae_buf_ops *bops = ring->q->handle->bops; - struct hnae_desc_cb tmp_cb = ring->desc_cb[i]; bops->unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - *res_cb = tmp_cb; ring->desc[i].addr = (__le64)ring->desc_cb[i].dma; ring->desc[i].rx.ipoff_bnum_pid_flag = 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 1a16c0307b47..522b264866b4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -252,7 +252,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) if (mac_cb->mac_type != HNAE_PORT_SERVICE) return 0; - ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE); + ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true); if (ret) { dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -261,7 +261,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) } ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, - mac_addr, ENABLE); + mac_addr, true); if (ret) dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -277,12 +277,19 @@ static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu) return hns_mac_set_mtu(mac_cb, new_mtu); } +static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + hns_ppe_set_tso_enable(ppe_cb, enable); +} + static int hns_ae_start(struct hnae_handle *handle) { int ret; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); - ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE); + ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); if (ret) return ret; @@ -309,7 +316,7 @@ void hns_ae_stop(struct hnae_handle *handle) hns_ae_ring_enable_all(handle, 0); - (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE); + (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); } static void hns_ae_reset(struct hnae_handle *handle) @@ -334,12 +341,30 @@ void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) else flag = RCB_INT_FLAG_RX; - hns_rcb_int_clr_hw(ring->q, flag); hns_rcb_int_ctrl_hw(ring->q, flag, mask); } +static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) +{ + u32 flag; + + if (is_tx_ring(ring)) + flag = RCB_INT_FLAG_TX; + else + flag = RCB_INT_FLAG_RX; + + hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); +} + static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) { + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + else + hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + hns_rcb_start(queue, val); } @@ -730,6 +755,53 @@ int hns_ae_get_regs_len(struct hnae_handle *handle) return total_num; } +static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_KEY_SIZE; +} + +static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_IND_TBL_SIZE; +} + +static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* currently we support only one type of hash function i.e. Toep hash */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* get the RSS Key required by the user */ + if (key) + memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); + + /* update the current hash->queue mappings from the shadow RSS table */ + memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE); + + return 0; +} + +static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* set the RSS Hash Key if specififed by the user */ + if (key) + hns_ppe_set_rss_key(ppe_cb, (int *)key); + + /* update the shadow RSS table with user specified qids */ + memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE); + + /* now update the hardware */ + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + + return 0; +} + static struct hnae_ae_ops hns_dsaf_ops = { .get_handle = hns_ae_get_handle, .put_handle = hns_ae_put_handle, @@ -758,19 +830,34 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_mc_addr = hns_ae_set_multicast_one, .set_mtu = hns_ae_set_mtu, .update_stats = hns_ae_update_stats, + .set_tso_stats = hns_ae_set_tso_stats, .get_stats = hns_ae_get_stats, .get_strings = hns_ae_get_strings, .get_sset_count = hns_ae_get_sset_count, .update_led_status = hns_ae_update_led_status, .set_led_id = hns_ae_cpld_set_led_id, .get_regs = hns_ae_get_regs, - .get_regs_len = hns_ae_get_regs_len + .get_regs_len = hns_ae_get_regs_len, + .get_rss_key_size = hns_ae_get_rss_key_size, + .get_rss_indir_size = hns_ae_get_rss_indir_size, + .get_rss = hns_ae_get_rss, + .set_rss = hns_ae_set_rss }; int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + switch (dsaf_dev->dsaf_ver) { + case AE_VERSION_1: + hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq; + break; + case AE_VERSION_2: + hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq; + break; + default: + break; + } ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 026b38676cba..5ef0e96e918a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -283,7 +283,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, } int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en) + u32 port_num, char *addr, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -295,7 +295,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -368,7 +368,7 @@ static void hns_mac_param_get(struct mac_params *param, *retuen 0 - success , negative --fail */ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, - u32 port_num, u16 vlan_id, u8 en) + u32 port_num, u16 vlan_id, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -386,7 +386,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -403,7 +403,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, *@en:enable *retuen 0 - success , negative --fail */ -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -427,7 +427,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) return ret; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -648,7 +648,7 @@ static int hns_mac_init_ex(struct hns_mac_cb *mac_cb) hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex); - ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE); + ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, true); if (ret) goto free_mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 7da95a7581f9..0b052191d751 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -425,8 +425,8 @@ void mac_adjust_link(struct net_device *net_dev); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en); -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en); + u32 port_num, char *addr, bool enable); +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); void hns_mac_start(struct hns_mac_cb *mac_cb); void hns_mac_stop(struct hns_mac_cb *mac_cb); int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 2a98eba660c0..1c33bd06bd5c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -38,10 +38,10 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) const char *name, *mode_str; struct device_node *np = dsaf_dev->dev->of_node; - if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2")) - dsaf_dev->dsaf_ver = AE_VERSION_2; - else + if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; + else + dsaf_dev->dsaf_ver = AE_VERSION_2; ret = of_property_read_string(np, "dsa_name", &name); if (ret) { @@ -274,6 +274,8 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, } } +#define HNS_DSAF_SBM_NUM(dev) \ + (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM) /** * hns_dsaf_sbm_cfg - config sbm * @dsaf_id: dsa fabric id @@ -283,7 +285,7 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) u32 o_sbm_cfg; u32 i; - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { o_sbm_cfg = dsaf_read_dev(dsaf_dev, DSAF_SBM_CFG_REG_0_REG + 0x80 * i); dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1); @@ -304,13 +306,19 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) u32 reg; u32 read_cnt; - for (i = 0; i < DSAF_SBM_NUM; i++) { + /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */ + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { + reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; + dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0); + } + + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1); } /* waitint for all sbm enable finished */ - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { read_cnt = 0; reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; do { @@ -338,83 +346,156 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) */ static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { - u32 o_sbm_bp_cfg0; - u32 o_sbm_bp_cfg1; - u32 o_sbm_bp_cfg2; - u32 o_sbm_bp_cfg3; + u32 o_sbm_bp_cfg; u32 reg; u32 i; /* XGE */ for (i = 0; i < DSAF_XGE_NUM; i++) { reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 104); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 10); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* RoCEE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 2); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } +} + +static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) +{ + u32 o_sbm_bp_cfg; + u32 reg; + u32 i; + + /* XGE */ + for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) { + reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + /* for no enable pfc mode */ + reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } + + /* PPE */ + reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + /* RoCEE */ + for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { + reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } @@ -985,11 +1066,38 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) else tc_cfg = HNS_DSAF_I8TC_CFG; + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + for (i = 0; i < DSAF_INODE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, + i % DSAF_XGE_NUM); + } + } else { + for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, 0); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT1_NUM_M, + DSAFV2_INODE_IN_PORT1_NUM_S, 1); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT2_NUM_M, + DSAFV2_INODE_IN_PORT2_NUM_S, 2); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT3_NUM_M, + DSAFV2_INODE_IN_PORT3_NUM_S, 3); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT4_NUM_M, + DSAFV2_INODE_IN_PORT4_NUM_S, 4); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT5_NUM_M, + DSAFV2_INODE_IN_PORT5_NUM_S, 5); + } + } for (i = 0; i < DSAF_INODE_NUM; i++) { - reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; - dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M, - DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM); - reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i; dsaf_write_dev(dsaf_dev, reg, tc_cfg); } @@ -1002,10 +1110,17 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) { u32 flag; + u32 finish_msk; u32 cnt = 0; int ret; - hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAF_SRAM_INIT_OVER_M; + } else { + hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAFV2_SRAM_INIT_OVER_M; + } /* enable sbm chanel, disable sbm chanel shcut function*/ hns_dsaf_sbm_cfg(dsaf_dev); @@ -1024,11 +1139,13 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) do { usleep_range(200, 210);/*udelay(200);*/ - flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG); + flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG, + finish_msk, DSAF_SRAM_INIT_OVER_S); cnt++; - } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT); + } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) && + cnt < DSAF_CFG_READ_CNT); - if (flag != DSAF_SRAM_INIT_FINISH_FLAG) { + if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) { dev_err(dsaf_dev->dev, "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n", dsaf_dev->ae_dev.name, flag, cnt); @@ -1259,12 +1376,8 @@ int hns_dsaf_set_mac_uc_entry( if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr) || MAC_IS_MULTICAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", - dsaf_dev->ae_dev.name, mac_entry->addr[0], - mac_entry->addr[1], mac_entry->addr[2], - mac_entry->addr[3], mac_entry->addr[4], - mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", + dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } @@ -1331,12 +1444,8 @@ int hns_dsaf_set_mac_mc_entry( /* mac addr check */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", - dsaf_dev->ae_dev.name, mac_entry->addr[0], - mac_entry->addr[1], mac_entry->addr[2], - mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", + dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } @@ -1410,11 +1519,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, /*chechk mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", + mac_entry->addr); return -EINVAL; } @@ -1497,9 +1603,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, /*check mac addr */ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { - dev_err(dsaf_dev->dev, - "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", + addr); return -EINVAL; } @@ -1563,11 +1668,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /*check mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", + mac_entry->addr); return -EINVAL; } @@ -1644,11 +1746,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, /* check macaddr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", + mac_entry->addr); return -EINVAL; } @@ -1695,11 +1794,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, /*check mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", + mac_entry->addr); return -EINVAL; } @@ -2032,7 +2128,7 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); /* dsaf inode registers */ - for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[232 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index b2b93484995c..31c312f9826e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -19,24 +19,20 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" -#define ENABLE (0x1) -#define DISABLE (0x0) +#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 -#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000) +#define DSAF_BASE_INNER_PORT_NUM 127/* mac tbl qid*/ -#define DSAF_BASE_INNER_PORT_NUM (127) /* mac tbl qid*/ +#define DSAF_MAX_CHIP_NUM 2 /*max 2 chips */ -#define DSAF_MAX_CHIP_NUM (2) /*max 2 chips */ +#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE 22 -#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22) +#define HNS_DSAF_MAX_DESC_CNT 1024 +#define HNS_DSAF_MIN_DESC_CNT 16 -#define HNS_DSAF_MAX_DESC_CNT (1024) -#define HNS_DSAF_MIN_DESC_CNT (16) +#define DSAF_INVALID_ENTRY_IDX 0xffff -#define DSAF_INVALID_ENTRY_IDX (0xffff) - -#define DSAF_CFG_READ_CNT (30) -#define DSAF_SRAM_INIT_FINISH_FLAG (0xff) +#define DSAF_CFG_READ_CNT 30 #define MAC_NUM_OCTETS_PER_ADDR 6 @@ -274,10 +270,6 @@ struct dsaf_device { struct device *dev; struct hnae_ae_dev ae_dev; - void *priv; - - int virq[DSAF_IRQ_NUM]; - u8 __iomem *sc_base; u8 __iomem *sds_base; u8 __iomem *ppe_base; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 523e9b83d304..607c3be42241 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -149,7 +149,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) if (port < DSAF_SERVICE_NW_NUM) { reg_val_1 = 0x1 << port; - reg_val_2 = 0x1041041 << port; + /* there is difference between V1 and V2 in register.*/ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + reg_val_2 = 0x1041041 << port; + else + reg_val_2 = 0x2082082 << port; if (val == 0) { dsaf_write_reg(dsaf_dev->sc_base, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 67f33f185a44..f302ef9073c6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -19,6 +19,48 @@ #include "hns_dsaf_ppe.h" +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value) +{ + dsaf_set_dev_bit(ppe_cb, PPEV2_CFG_TSO_EN_REG, 0, !!value); +} + +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]) +{ + int key_item = 0; + + for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++) + dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4, + rss_key[key_item]); +} + +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]) +{ + int i; + int reg_value; + + for (i = 0; i < (HNS_PPEV2_RSS_IND_TBL_SIZE / 4); i++) { + reg_value = dsaf_read_dev(ppe_cb, + PPEV2_INDRECTION_TBL_REG + i * 0x4); + + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N0_M, + PPEV2_CFG_RSS_TBL_4N0_S, + rss_tab[i * 4 + 0] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N1_M, + PPEV2_CFG_RSS_TBL_4N1_S, + rss_tab[i * 4 + 1] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N2_M, + PPEV2_CFG_RSS_TBL_4N2_S, + rss_tab[i * 4 + 2] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N3_M, + PPEV2_CFG_RSS_TBL_4N3_S, + rss_tab[i * 4 + 3] & 0x1F); + dsaf_write_dev( + ppe_cb, PPEV2_INDRECTION_TBL_REG + i * 0x4, reg_value); + } +} + static void __iomem *hns_ppe_common_get_ioaddr( struct ppe_common_cb *ppe_common) { @@ -134,6 +176,11 @@ static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb) PPE_CNT_CLR_CE_B, 1); } +static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en) +{ + dsaf_write_dev(ppe_cb, PPEV2_VLAN_STRIP_EN_REG, en); +} + /** * hns_ppe_checksum_hw - set ppe checksum caculate * @ppe_device: ppe device @@ -266,13 +313,17 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) /** * ppe_init_hw - init ppe - * @ppe_device: ppe device + * @ppe_cb: ppe device */ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) { struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb; u32 port = ppe_cb->port; struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev; + int i; + + /* get default RSS key */ + netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); hns_ppe_srst_by_port(dsaf_dev, port, 0); mdelay(10); @@ -285,8 +336,21 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); else hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); + hns_ppe_checksum_hw(ppe_cb, 0xffffffff); hns_ppe_cnt_clr_ce(ppe_cb); + + if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_ppe_set_vlan_strip(ppe_cb, 0); + + /* set default RSS key in h/w */ + hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); + + /* Set default indrection table in h/w */ + for (i = 0; i < HNS_PPEV2_RSS_IND_TBL_SIZE; i++) + ppe_cb->rss_indir_table[i] = i; + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + } } /** @@ -341,13 +405,13 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) if (ret) return; + for (i = 0; i < ppe_common->ppe_num; i++) + hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]); if (ret) return; - for (i = 0; i < ppe_common->ppe_num; i++) - hns_ppe_init_hw(&ppe_common->ppe_cb[i]); - hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 4894f9a0d39f..0f5cb6962acf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -25,15 +25,24 @@ #define ETH_PPE_DUMP_NUM 576 #define ETH_PPE_STATIC_NUM 12 + +#define HNS_PPEV2_RSS_IND_TBL_SIZE 256 +#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */ +#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32)) + enum ppe_qid_mode { - PPE_QID_MODE0 = 0, /* fixed queue id mode */ - PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ - PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ - PPE_QID_MODE3, /* switch:4TC/8TAG non switch:2Port/64VM */ - PPE_QID_MODE4, /* switch:8VM/16TAG non switch:2Port/16VM/4TC */ - PPE_QID_MODE5, /* non switch:6Port/16TAG */ - PPE_QID_MODE6, /* non switch:6Port/2VM/8TC */ - PPE_QID_MODE7, /* non switch:2Port/8VM/8TC */ + PPE_QID_MODE0 = 0, /* fixed queue id mode */ + PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ + PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ + PPE_QID_MODE3, /* switch:4TC/8RSS non switch:2Port/64VM */ + PPE_QID_MODE4, /* switch:8VM/16RSS non switch:2Port/16VM/4TC */ + PPE_QID_MODE5, /* switch:16VM/8TC non switch:6Port/16RSS */ + PPE_QID_MODE6, /* switch:32VM/4RSS non switch:6Port/2VM/8TC */ + PPE_QID_MODE7, /* switch:32RSS non switch:2Port/8VM/8TC */ + PPE_QID_MODE8, /* switch:6VM/4TC/4RSS non switch:2Port/16VM/4RSS */ + PPE_QID_MODE9, /* non switch:2Port/32VM/2RSS */ + PPE_QID_MODE10, /* non switch:2Port/32RSS */ + PPE_QID_MODE11, /* non switch:2Port/4TC/16RSS */ }; enum ppe_port_mode { @@ -72,6 +81,8 @@ struct hns_ppe_cb { u8 port; /* port id in dsaf */ void __iomem *io_base; int virq; + u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ + u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ }; struct ppe_common_cb { @@ -102,4 +113,9 @@ void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data); void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data); void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data); +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value); +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]); +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]); #endif /* _HNS_DSAF_PPE_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 4db32c62f062..d2263c72bd8a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -136,19 +136,37 @@ void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) { - u32 clr = 1; - if (flag & RCB_INT_FLAG_TX) { - dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); } if (flag & RCB_INT_FLAG_RX) { - dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); } } +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) +{ + u32 int_mask_en = !!mask; + + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); +} + +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) +{ + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); +} + /** *hns_rcb_ring_enable_hw - enable ring *@ring: rcb ring @@ -193,6 +211,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, @@ -204,6 +223,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, @@ -232,9 +252,6 @@ void hns_rcb_init_hw(struct ring_pair_cb *ring) static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, u32 port_idx, u32 desc_cnt) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; - dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, desc_cnt); } @@ -249,8 +266,6 @@ static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; if (coalesced_frames >= rcb_common->desc_num || coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) return -EINVAL; @@ -354,6 +369,9 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, HNS_RCB_COMMON_ENDIAN); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); + return 0; } @@ -387,19 +405,23 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) struct rcb_common_cb *rcb_common; struct ring_pair_cb *ring_pair_cb; u32 buf_size; - u16 desc_num; - int irq_idx; + u16 desc_num, mdnum_ppkt; + bool irq_idx, is_ver1; ring_pair_cb = container_of(q, struct ring_pair_cb, q); + is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); if (ring_type == RX_RING) { ring = &q->rx_ring; ring->io_base = ring_pair_cb->q.io_base; irq_idx = HNS_RCB_IRQ_IDX_RX; + mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; + mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : + HNS_RCBV2_RING_MAX_TXBD_PER_PKT; } rcb_common = ring_pair_cb->rcb_common; @@ -414,7 +436,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) ring->buf_size = buf_size; ring->desc_num = desc_num; - ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT; + ring->max_desc_num_per_pkt = mdnum_ppkt; ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; ring->next_to_use = 0; @@ -445,14 +467,22 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) return port; } +#define SERVICE_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_OFFSET(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) { int comm_index = rcb_common->comm_index; + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) - return HNS_SERVICE_RING_IRQ_IDX; + return SERVICE_RING_IRQ_IDX(is_ver1); else - return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2; + return DEBUG_RING_IRQ_IDX(is_ver1) + + (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); } #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ @@ -468,6 +498,9 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) u32 ring_num = rcb_common->ring_num; int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); struct device_node *np = rcb_common->dsaf_dev->dev->of_node; + struct platform_device *pdev = + to_platform_device(rcb_common->dsaf_dev->dev); + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); for (i = 0; i < ring_num; i++) { ring_pair_cb = &rcb_common->ring_pair_cb[i]; @@ -477,10 +510,12 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->q.io_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : + platform_get_irq(pdev, base_irq_idx + i * 3 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : + platform_get_irq(pdev, base_irq_idx + i * 3); ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); hns_rcb_ring_pair_get_cfg(ring_pair_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 3a2afe2dd8bb..29041b18741a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -26,6 +26,8 @@ struct rcb_common_cb; #define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN #define HNS_RCB_DEBUG_NW_ENGINE_NUM 1 #define HNS_RCB_RING_MAX_BD_PER_PKT 3 +#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3 +#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8 #define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU #define HNS_RCB_RING_MAX_PENDING_BD 1024 @@ -106,13 +108,17 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); -void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, u16 *max_vfn, u16 *max_q_per_vf); +void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); + void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val); void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag); void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable); +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask); +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); + void hns_rcb_init_hw(struct ring_pair_cb *ring); void hns_rcb_reset_ring_hw(struct hnae_queue *q); void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b475e1bf2e6f..5d1b746e141d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,21 +10,12 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ -#define HNS_GE_FIFO_ERR_INTNUM 8 -#define HNS_XGE_ERR_INTNUM 6 -#define HNS_RCB_COMM_ERR_INTNUM 12 -#define HNS_PPE_TNL_ERR_INTNUM 8 -#define HNS_DSAF_EVENT_INTNUM 21 -#define HNS_DEBUG_RING_INTNUM 4 -#define HNS_SERVICE_RING_INTNUM 256 - -#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\ - HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\ - HNS_DSAF_EVENT_INTNUM) -#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\ - HNS_DEBUG_RING_INTNUM) - -#define DSAF_IRQ_NUM 18 +#define HNS_DEBUG_RING_IRQ_IDX 55 +#define HNS_SERVICE_RING_IRQ_IDX 59 +#define HNS_DEBUG_RING_IRQ_OFFSET 2 +#define HNSV2_DEBUG_RING_IRQ_IDX 409 +#define HNSV2_SERVICE_RING_IRQ_IDX 25 +#define HNSV2_DEBUG_RING_IRQ_OFFSET 9 #define DSAF_MAX_PORT_NUM_PER_CHIP 8 #define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 @@ -39,9 +30,15 @@ #define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM +#define DSAF_PORT_TYPE_NUM 3 #define DSAF_NODE_NUM 18 #define DSAF_XOD_BIG_NUM DSAF_NODE_NUM #define DSAF_SBM_NUM DSAF_NODE_NUM +#define DSAFV2_SBM_NUM 8 +#define DSAFV2_SBM_XGE_CHN 6 +#define DSAFV2_SBM_PPE_CHN 1 +#define DASFV2_ROCEE_CRD_NUM 8 + #define DSAF_VOQ_NUM DSAF_NODE_NUM #define DSAF_INODE_NUM DSAF_NODE_NUM #define DSAF_XOD_NUM 8 @@ -52,56 +49,56 @@ #define DSAF_TCAM_SUM 512 #define DSAF_LINE_SUM (2048 * 14) -#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C -#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 -#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 -#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 -#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 -#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 -#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C -#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 -#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 -#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 -#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C -#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 -#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 -#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 -#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 -#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 -#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C -#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 -#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 -#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 -#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C -#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 -#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 -#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 -#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C -#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 -#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 -#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 -#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 -#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 -#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C -#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 -#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 -#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 -#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 -#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 -#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 -#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C -#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 -#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 +#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C +#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 +#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 +#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 +#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 +#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 +#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C +#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 +#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 +#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 +#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C +#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 +#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 +#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 +#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 +#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 +#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C +#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 +#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 +#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 +#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C +#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 +#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 +#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 +#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 +#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 +#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 +#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 +#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 +#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C +#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 +#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 +#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 +#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 +#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 +#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 +#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C +#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 +#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 /*serdes offset**/ #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG @@ -178,6 +175,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 #define DSAF_SBM_BP_CNT_0_0_REG 0x2018 @@ -319,6 +317,8 @@ #define PPE_CFG_TAG_GEN_REG 0x90 #define PPE_CFG_PARSE_TAG_REG 0x94 #define PPE_CFG_PRO_CHECK_EN_REG 0x98 +#define PPEV2_CFG_TSO_EN_REG 0xA0 +#define PPEV2_VLAN_STRIP_EN_REG 0xAC #define PPE_INTEN_REG 0x100 #define PPE_RINT_REG 0x104 #define PPE_INTSTS_REG 0x108 @@ -351,6 +351,8 @@ #define PPE_ECO0_REG 0x32C #define PPE_ECO1_REG 0x330 #define PPE_ECO2_REG 0x334 +#define PPEV2_INDRECTION_TBL_REG 0x800 +#define PPEV2_RSS_KEY_REG 0x900 #define RCB_COM_CFG_ENDIAN_REG 0x0 #define RCB_COM_CFG_SYS_FSH_REG 0xC @@ -431,8 +433,10 @@ #define RCB_RING_INTMSK_RXWL_REG 0x000A0 #define RCB_RING_INTSTS_RX_RING_REG 0x000A4 +#define RCBV2_RX_RING_INT_STS_REG 0x000A8 #define RCB_RING_INTMSK_TXWL_REG 0x000AC #define RCB_RING_INTSTS_TX_RING_REG 0x000B0 +#define RCBV2_TX_RING_INT_STS_REG 0x000B4 #define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8 #define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 @@ -678,6 +682,10 @@ #define XGMAC_TRX_CORE_SRST_M 0x2080 +#define DSAF_SRAM_INIT_OVER_M 0xff +#define DSAFV2_SRAM_INIT_OVER_M 0x3ff +#define DSAF_SRAM_INIT_OVER_S 0 + #define DSAF_CFG_EN_S 0 #define DSAF_CFG_TC_MODE_S 1 #define DSAF_CFG_CRC_EN_S 2 @@ -685,6 +693,7 @@ #define DSAF_CFG_MIX_MODE_S 4 #define DSAF_CFG_STP_MODE_S 5 #define DSAF_CFG_LOCA_ADDR_EN_S 6 +#define DSAFV2_CFG_VLAN_TAG_MODE_S 17 #define DSAF_CNT_CLR_CE_S 0 #define DSAF_SNAP_EN_S 1 @@ -707,6 +716,16 @@ #define DSAF_INODE_IN_PORT_NUM_M 7 #define DSAF_INODE_IN_PORT_NUM_S 0 +#define DSAFV2_INODE_IN_PORT1_NUM_M (7ULL << 3) +#define DSAFV2_INODE_IN_PORT1_NUM_S 3 +#define DSAFV2_INODE_IN_PORT2_NUM_M (7ULL << 6) +#define DSAFV2_INODE_IN_PORT2_NUM_S 6 +#define DSAFV2_INODE_IN_PORT3_NUM_M (7ULL << 9) +#define DSAFV2_INODE_IN_PORT3_NUM_S 9 +#define DSAFV2_INODE_IN_PORT4_NUM_M (7ULL << 12) +#define DSAFV2_INODE_IN_PORT4_NUM_S 12 +#define DSAFV2_INODE_IN_PORT5_NUM_M (7ULL << 15) +#define DSAFV2_INODE_IN_PORT5_NUM_S 15 #define HNS_DSAF_I4TC_CFG 0x18688688 #define HNS_DSAF_I8TC_CFG 0x18FAC688 @@ -738,6 +757,33 @@ #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10 #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10) +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S 18 +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 18) + +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + #define DSAF_TBL_TCAM_ADDR_S 0 #define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) @@ -797,6 +843,18 @@ #define PPE_CFG_QID_MODE_CF_QID_MODE_S 8 #define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S) +#define PPEV2_CFG_RSS_TBL_4N0_S 0 +#define PPEV2_CFG_RSS_TBL_4N0_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N0_S) + +#define PPEV2_CFG_RSS_TBL_4N1_S 8 +#define PPEV2_CFG_RSS_TBL_4N1_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N1_S) + +#define PPEV2_CFG_RSS_TBL_4N2_S 16 +#define PPEV2_CFG_RSS_TBL_4N2_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N2_S) + +#define PPEV2_CFG_RSS_TBL_4N3_S 24 +#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 @@ -898,7 +956,7 @@ #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 -static inline void dsaf_write_reg(void *base, u32 reg, u32 value) +static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) { u8 __iomem *reg_addr = ACCESS_ONCE(base); @@ -908,7 +966,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value) #define dsaf_write_dev(a, reg, value) \ dsaf_write_reg((a)->io_base, (reg), (value)) -static inline u32 dsaf_read_reg(u8 *base, u32 reg) +static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) { u8 __iomem *reg_addr = ACCESS_ONCE(base); @@ -927,8 +985,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg) #define dsaf_set_bit(origin, shift, val) \ dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) -static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, - u32 val) +static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, + u32 shift, u32 val) { u32 origin = dsaf_read_reg(base, reg); @@ -947,7 +1005,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, #define dsaf_get_bit(origin, shift) \ dsaf_get_field((origin), (1ull << (shift)), (shift)) -static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) +static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, + u32 shift) { u32 origin; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 08cef0dfb5db..0e30846a24f8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -33,10 +33,105 @@ #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 +#define HNS_BUFFER_SIZE_2048 2048 + +#define BD_MAX_SEND_SIZE 8191 +#define SKB_TMP_LEN(SKB) \ + (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) + +static void fill_v2_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + struct hnae_desc *desc = &ring->desc[ring->next_to_use]; + struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct iphdr *iphdr; + struct ipv6hdr *ipv6hdr; + struct sk_buff *skb; + int skb_tmp_len; + __be16 protocol; + u8 bn_pid = 0; + u8 rrcfv = 0; + u8 ip_offset = 0; + u8 tvsvsn = 0; + u16 mss = 0; + u8 l4_len = 0; + u16 paylen = 0; + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + + /*config bd buffer end */ + hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); + hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + ip_offset = ETH_HLEN; + + if (protocol == htons(ETH_P_8021Q)) { + ip_offset += VLAN_HLEN; + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + + if (skb->protocol == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (iphdr->protocol == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); + ipv6hdr = ipv6_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (ipv6hdr->nexthdr == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } + desc->tx.ip_offset = ip_offset; + desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; + desc->tx.mss = cpu_to_le16(mss); + desc->tx.l4_len = l4_len; + desc->tx.paylen = cpu_to_le16(paylen); + } + } + + hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); + + desc->tx.bn_pid = bn_pid; + desc->tx.ra_ri_cs_fe_vld = rrcfv; + + ring_ptr_move_fw(ring, next_to_use); +} static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type) + int buf_num, enum hns_desc_type type, int mtu) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -100,47 +195,129 @@ static void unfill_desc(struct hnae_ring *ring) ring_ptr_move_bw(ring, next_to_use); } -int hns_nic_net_xmit_hw(struct net_device *ndev, - struct sk_buff *skb, - struct hns_nic_ring_data *ring_data) +static int hns_nic_maybe_stop_tx( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) { - struct hns_nic_priv *priv = netdev_priv(ndev); - struct device *dev = priv->dev; - struct hnae_ring *ring = ring_data->ring; - struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; int buf_num; - dma_addr_t dma; - int size, next_to_use; - int i, j; - struct sk_buff *new_skb; - - assert(ring->max_desc_num_per_pkt <= ring->desc_num); /* no. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { - if (ring_space(ring) < 1) { - ring->stats.tx_busy++; - goto out_net_tx_busy; - } + if (ring_space(ring) < 1) + return -EBUSY; new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) { - ring->stats.sw_err_cnt++; - netdev_err(ndev, "no memory to xmit!\n"); - goto out_err_tx_ok; - } + if (!new_skb) + return -ENOMEM; dev_kfree_skb_any(skb); - skb = new_skb; + *out_skb = new_skb; buf_num = 1; - assert(skb_shinfo(skb)->nr_frags == 1); } else if (buf_num > ring_space(ring)) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static int hns_nic_maybe_stop_tso( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) +{ + int i; + int size; + int buf_num; + int frag_num; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; + struct skb_frag_struct *frag; + + size = skb_headlen(skb); + buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + } + + if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { + buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + + } else if (ring_space(ring) < buf_num) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static void fill_tso_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + int frag_buf_num; + int sizeoflast; + int k; + + frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + sizeoflast = size % BD_MAX_SEND_SIZE; + sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; + + /* when the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) + fill_v2_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : BD_MAX_SEND_SIZE, + dma + BD_MAX_SEND_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + buf_num, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE, + mtu); +} + +int hns_nic_net_xmit_hw(struct net_device *ndev, + struct sk_buff *skb, + struct hns_nic_ring_data *ring_data) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct hnae_ring *ring = ring_data->ring; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int buf_num; + int seg_num; + dma_addr_t dma; + int size, next_to_use; + int i; + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: ring->stats.tx_busy++; goto out_net_tx_busy; + case -ENOMEM: + ring->stats.sw_err_cnt++; + netdev_err(ndev, "no memory to xmit!\n"); + goto out_err_tx_ok; + default: + break; } + + /* no. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; next_to_use = ring->next_to_use; /* fill the first part */ @@ -151,11 +328,11 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } - fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num, - DESC_TYPE_SKB); + priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + buf_num, DESC_TYPE_SKB, ndev->mtu); /* fill the fragments */ - for (i = 1; i < buf_num; i++) { + for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); @@ -164,8 +341,9 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_map_frag_fail; } - fill_desc(ring, skb_frag_page(frag), size, dma, - buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE); + priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, buf_num, + DESC_TYPE_PAGE, ndev->mtu); } /*complete translate all packets*/ @@ -182,19 +360,20 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, out_map_frag_fail: - for (j = i - 1; j > 0; j--) { + while (ring->next_to_use != next_to_use) { unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_page(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, - DMA_TO_DEVICE); + if (ring->next_to_use != next_to_use) + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_single(dev, + ring->desc_cb[next_to_use].dma, + ring->desc_cb[next_to_use].length, + DMA_TO_DEVICE); } - unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_single(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, DMA_TO_DEVICE); - out_err_tx_ok: dev_kfree_skb_any(skb); @@ -313,51 +492,110 @@ static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, return max_size; } -static void -hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset) +static void hns_nic_reuse_page(struct sk_buff *skb, int i, + struct hnae_ring *ring, int pull_len, + struct hnae_desc_cb *desc_cb) { + struct hnae_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + /* avoid re-using remote pages,flag default unreuse */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) { - /* move offset up to the next cache line */ - desc_cb->page_offset += tsize; + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* if we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; - if (desc_cb->page_offset <= last_offset) { desc_cb->reuse_flag = 1; /* bump ref count on page before it is given*/ get_page(desc_cb->priv); } + return; + } + + /* move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); } } +static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; +} + +static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); +} + static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, struct sk_buff **out_skb, int *out_bnum) { struct hnae_ring *ring = ring_data->ring; struct net_device *ndev = ring_data->napi.dev; + struct hns_nic_priv *priv = netdev_priv(ndev); struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; unsigned char *va; - int bnum, length, size, i, truesize, last_offset; + int bnum, length, i; int pull_len; u32 bnum_flag; - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - length = le16_to_cpu(desc->rx.pkt_len); - bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); - bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); - *out_bnum = bnum; + + prefetch(desc); + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; - skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring_data->napi, + HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } + prefetchw(skb->data); + length = le16_to_cpu(desc->rx.pkt_len); + bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); + priv->ops.get_rxd_bnum(bnum_flag, &bnum); + *out_bnum = bnum; + if (length <= HNS_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -380,13 +618,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, 0, desc_cb->priv, - desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); - - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ @@ -396,13 +628,8 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, for (i = 1; i < bnum; i++) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, i, desc_cb->priv, - desc_cb->page_offset, - size, truesize); - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, i, ring, 0, desc_cb); ring_ptr_move_fw(ring, next_to_clean); } } @@ -540,20 +767,20 @@ recv: } /* make all data has been write before submit */ - if (clean_count > 0) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); - clean_count = 0; - } - if (recv_pkts < budget) { ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - rmb(); /*complete read rx ring bd number*/ - if (ex_num > 0) { - num += ex_num; + + if (ex_num > clean_count) { + num += ex_num - clean_count; + rmb(); /*complete read rx ring bd number*/ goto recv; } } + /* make all data has been write before submit */ + if (clean_count > 0) + hns_nic_alloc_rx_buffers(ring_data, clean_count); + return recv_pkts; } @@ -642,14 +869,20 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, bytes = 0; pkts = 0; - while (head != ring->next_to_clean) + while (head != ring->next_to_clean) { hns_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + } NETIF_TX_UNLOCK(ndev); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); + if (unlikely(priv->link && !netif_carrier_ok(ndev))) + netif_carrier_on(ndev); + if (unlikely(pkts && netif_carrier_ok(ndev) && (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { /* Make sure that anybody stopping the queue after this @@ -716,6 +949,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) ring_data->ring, 0); ring_data->fini_process(ring_data); + return 0; } return clean_complete; @@ -848,15 +1082,58 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx) napi_disable(&priv->ring_data[idx].napi); } -static int hns_nic_init_irq(struct hns_nic_priv *priv) +static void hns_set_irq_affinity(struct hns_nic_priv *priv) { struct hnae_handle *h = priv->ae_handle; struct hns_nic_ring_data *rd; int i; - int ret; int cpu; cpumask_t mask; + /*diffrent irq banlance for 16core and 32core*/ + if (h->q_num == num_possible_cpus()) { + for (i = 0; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index)) { + cpumask_clear(&mask); + cpu = rd->queue_index; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } else { + for (i = 0; i < h->q_num; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + + for (i = h->q_num; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2 + 1)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2 + 1; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } +} + +static int hns_nic_init_irq(struct hns_nic_priv *priv) +{ + struct hnae_handle *h = priv->ae_handle; + struct hns_nic_ring_data *rd; + int i; + int ret; + for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; @@ -878,16 +1155,11 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) } disable_irq(rd->ring->irq); rd->ring->irq_init_flag = RCB_IRQ_INITED; - - /*set cpu affinity*/ - if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); - cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); - irq_set_affinity_hint(rd->ring->irq, &mask); - } } + /*set cpu affinity*/ + hns_set_irq_affinity(priv); + return 0; } @@ -1136,6 +1408,51 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) return ret; } +static int hns_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + switch (priv->enet_ver) { + case AE_VERSION_1: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + netdev_info(netdev, "enet v1 do not support tso!\n"); + break; + default: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* The chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + h->dev->ops->set_tso_stats(h, 0); + } + break; + } + netdev->features = features; + return 0; +} + +static netdev_features_t hns_nic_fix_features( + struct net_device *netdev, netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + + switch (priv->enet_ver) { + case AE_VERSION_1: + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_FILTER); + break; + default: + break; + } + return features; +} + /** * nic_set_multicast_list - set mutl mac address * @netdev: net device @@ -1231,6 +1548,8 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_set_mac_address = hns_nic_net_set_mac_address, .ndo_change_mtu = hns_nic_change_mtu, .ndo_do_ioctl = hns_nic_do_ioctl, + .ndo_set_features = hns_nic_set_features, + .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = hns_nic_poll_controller, @@ -1315,22 +1634,26 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) return; hns_nic_dump(priv); - netdev_info(priv->netdev, "Reset %s port\n", - (type == HNAE_PORT_DEBUG ? "debug" : "business")); + netdev_info(priv->netdev, "try to reset %s port!\n", + (type == HNAE_PORT_DEBUG ? "debug" : "service")); rtnl_lock(); /* put off any impending NetWatchDogTimeout */ priv->netdev->trans_start = jiffies; - if (type == HNAE_PORT_DEBUG) + if (type == HNAE_PORT_DEBUG) { hns_nic_net_reinit(priv->netdev); + } else { + netif_carrier_off(priv->netdev); + netif_tx_disable(priv->netdev); + } rtnl_unlock(); } /* for doing service complete*/ static void hns_nic_service_event_complete(struct hns_nic_priv *priv) { - assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); + WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); smp_mb__before_atomic(); clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); @@ -1435,8 +1758,9 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) for (i = 0; i < h->q_num * 2; i++) { netif_napi_del(&priv->ring_data[i].napi); if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { - irq_set_affinity_hint(priv->ring_data[i].ring->irq, - NULL); + (void)irq_set_affinity_hint( + priv->ring_data[i].ring->irq, + NULL); free_irq(priv->ring_data[i].ring->irq, &priv->ring_data[i]); } @@ -1446,6 +1770,31 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) kfree(priv->ring_data); } +static void hns_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + if (AE_IS_VER1(priv->enet_ver)) { + priv->ops.fill_desc = fill_desc; + priv->ops.get_rxd_bnum = get_rx_desc_bnum; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } else { + priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* This chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } + } +} + static int hns_nic_try_get_ae(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); @@ -1473,6 +1822,8 @@ static int hns_nic_try_get_ae(struct net_device *ndev) goto out_init_ring_data; } + hns_nic_set_priv_ops(ndev); + ret = register_netdev(ndev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -1524,10 +1875,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - if (of_device_is_compatible(node, "hisilicon,hns-nic-v2")) - priv->enet_ver = AE_VERSION_2; - else + if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) priv->enet_ver = AE_VERSION_1; + else + priv->enet_ver = AE_VERSION_2; ret = of_property_read_string(node, "ae-name", &priv->ae_name); if (ret) @@ -1543,6 +1894,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ndev->priv_flags |= IFF_UNICAST_FLT; ndev->netdev_ops = &hns_nic_netdev_ops; hns_ethtool_set_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; @@ -1550,6 +1902,17 @@ static int hns_nic_dev_probe(struct platform_device *pdev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; + switch (priv->enet_ver) { + case AE_VERSION_2: + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; + break; + default: + break; + } + SET_NETDEV_DEV(ndev, dev); if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index dae0ed19ac6d..4b75270f014e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -40,6 +40,16 @@ struct hns_nic_ring_data { void (*fini_process)(struct hns_nic_ring_data *); }; +/* compatible the difference between two versions */ +struct hns_nic_ops { + void (*fill_desc)(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hnae_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + struct hns_nic_priv { const char *ae_name; u32 enet_ver; @@ -51,6 +61,8 @@ struct hns_nic_priv { struct device *dev; struct hnae_handle *ae_handle; + struct hns_nic_ops ops; + /* the cb for nic to manage the ring buffer, the first half of the * array is for tx_ring and vice versa for the second half */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a0332129970b..3df22840fcd1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -11,7 +11,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> - #include "hns_enet.h" #define HNS_PHY_PAGE_MDIX 0 @@ -72,24 +71,22 @@ static void hns_get_mdix_mode(struct net_device *net_dev, struct hns_nic_priv *priv = netdev_priv(net_dev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev || !phy_dev->bus) { + if (!phy_dev || !phy_dev->mdio.bus) { cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; return; } - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_MDIX); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG); + retval = phy_read(phy_dev, HNS_PHY_CSC_REG); mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG); + retval = phy_read(phy_dev, HNS_PHY_CSS_REG); mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B); is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); switch (mdix_ctrl) { case 0x0: @@ -254,53 +251,36 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) if (en) { /* speed : 1000M */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, 2); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 21, 0x1046); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); + phy_write(phy_dev, 21, 0x1046); /* Force Master */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 9, 0x1F00); + phy_write(phy_dev, 9, 0x1F00); /* Soft-reset */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); + phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); + phy_write(phy_dev, 0, 0x9140); + phy_write(phy_dev, 22, 0xFA); /* Default is 0x0400 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x418); + phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x20C); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); + phy_write(phy_dev, 7, 0x20C); + phy_write(phy_dev, 22, 0); /* Enable MAC loop-back */ - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x400); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x200); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); - - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, 1, 0x400); + phy_write(phy_dev, 7, 0x200); + phy_write(phy_dev, 22, 0); + + val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; } @@ -667,6 +647,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN); + drvinfo->eedump_len = 0; } /** @@ -1018,16 +999,9 @@ int hns_phy_led_set(struct net_device *netdev, int value) struct hns_nic_priv *priv = netdev_priv(netdev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev->bus) { - netdev_err(netdev, "phy_dev->bus is null!\n"); - return -EINVAL; - } - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG, - value); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); + retval = phy_write(phy_dev, HNS_LED_FC_REG, value); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); if (retval) { netdev_err(netdev, "mdiobus_write fail !\n"); return retval; @@ -1052,19 +1026,15 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) if (phy_dev) switch (state) { case ETHTOOL_ID_ACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus, - phy_dev->addr, - HNS_LED_FC_REG); + priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG); - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; return 2; @@ -1079,20 +1049,18 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) return ret; break; case ETHTOOL_ID_INACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_LED_FC_REG, priv->phy_led_val); + ret = phy_write(phy_dev, HNS_LED_FC_REG, + priv->phy_led_val); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; break; @@ -1187,6 +1155,95 @@ static int hns_nic_nway_reset(struct net_device *netdev) return ret; } +static u32 +hns_get_rss_key_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_key_size(priv->ae_handle); + + return ret; +} + +static u32 +hns_get_rss_indir_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_indir_size(priv->ae_handle); + + return ret; +} + +static int +hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + if (!indir) + return 0; + + ret = ops->get_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + +static int +hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + /* currently hfunc can only be Toeplitz hash */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + ret = ops->set_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + static struct ethtool_ops hns_ethtool_ops = { .get_drvinfo = hns_nic_get_drvinfo, .get_link = hns_nic_get_link, @@ -1206,6 +1263,10 @@ static struct ethtool_ops hns_ethtool_ops = { .get_regs_len = hns_get_regs_len, .get_regs = hns_get_regs, .nway_reset = hns_nic_nway_reset, + .get_rxfh_key_size = hns_get_rss_key_size, + .get_rxfh_indir_size = hns_get_rss_indir_size, + .get_rxfh = hns_get_rss, + .set_rxfh = hns_set_rss, }; void hns_ethtool_set_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 37491c85bc42..58c96c412fe8 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -463,11 +463,6 @@ static int hns_mdio_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, - sizeof(int), GFP_KERNEL); - if (!new_bus->irq) - return -ENOMEM; - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index ae6e30d39f0f..1d5c3e16d8f4 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d) } #ifdef CONFIG_EISA -static int __init hp100_eisa_probe (struct device *gendev) +static int hp100_eisa_probe(struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); struct eisa_device *edev = to_eisa_device(gendev); diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index 99c1cebd002d..37dceabf8861 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -37,4 +37,14 @@ config EHEA To compile the driver as a module, choose M here. The module will be called ehea. +config IBMVNIC + tristate "IBM Virtual NIC support" + depends on PPC_PSERIES + ---help--- + This driver supports Virtual NIC adapters on IBM i and IBM System p + systems. + + To compile this driver as a module, choose M here. The module will + be called ibmvnic. + endif # NET_VENDOR_IBM diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile index 2f04e71a5926..447865c8b632 100644 --- a/drivers/net/ethernet/ibm/Makefile +++ b/drivers/net/ethernet/ibm/Makefile @@ -3,5 +3,6 @@ # obj-$(CONFIG_IBMVETH) += ibmveth.o +obj-$(CONFIG_IBMVNIC) += ibmvnic.o obj-$(CONFIG_IBM_EMAC) += emac/ obj-$(CONFIG_EHEA) += ehea/ diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 7af870a3c549..335417b4756b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -169,7 +169,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) if (!pool->free_map) return -1; - pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); + pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); if (!pool->dma_addr) { kfree(pool->free_map); pool->free_map = NULL; @@ -187,8 +187,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) return -1; } - memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); - for (i = 0; i < pool->size; ++i) pool->free_map[i] = i; @@ -763,7 +761,7 @@ static netdev_features_t ibmveth_fix_features(struct net_device *dev, */ if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; return features; } @@ -928,7 +926,8 @@ static int ibmveth_set_features(struct net_device *dev, rc1 = ibmveth_set_csum_offload(dev, rx_csum); if (rc1 && !adapter->rx_csum) dev->features = - features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + features & ~(NETIF_F_CSUM_MASK | + NETIF_F_RXCSUM); } if (large_send != adapter->large_send) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c new file mode 100644 index 000000000000..7d6570843723 --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -0,0 +1,3585 @@ +/**************************************************************************/ +/* */ +/* IBM System i and System p Virtual NIC Device Driver */ +/* Copyright (C) 2014 IBM Corp. */ +/* Santiago Leon (santi_leon@yahoo.com) */ +/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ +/* John Allen (jallen@linux.vnet.ibm.com) */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. */ +/* */ +/* This module contains the implementation of a virtual ethernet device */ +/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ +/* option of the RS/6000 Platform Architecture to interface with virtual */ +/* ethernet NICs that are presented to the partition by the hypervisor. */ +/* */ +/* Messages are passed between the VNIC driver and the VNIC server using */ +/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ +/* issue and receive commands that initiate communication with the server */ +/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ +/* are used by the driver to notify the server that a packet is */ +/* ready for transmission or that a buffer has been added to receive a */ +/* packet. Subsequently, sCRQs are used by the server to notify the */ +/* driver that a packet transmission has been completed or that a packet */ +/* has been received and placed in a waiting buffer. */ +/* */ +/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ +/* which skbs are DMA mapped and immediately unmapped when the transmit */ +/* or receive has been completed, the VNIC driver is required to use */ +/* "long term mapping". This entails that large, continuous DMA mapped */ +/* buffers are allocated on driver initialization and these buffers are */ +/* then continuously reused to pass skbs to and from the VNIC server. */ +/* */ +/**************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/ethtool.h> +#include <linux/proc_fs.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/irq.h> +#include <linux/kthread.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <net/net_namespace.h> +#include <asm/hvcall.h> +#include <linux/atomic.h> +#include <asm/vio.h> +#include <asm/iommu.h> +#include <linux/uaccess.h> +#include <asm/firmware.h> +#include <linux/seq_file.h> + +#include "ibmvnic.h" + +static const char ibmvnic_driver_name[] = "ibmvnic"; +static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; + +MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); +MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVNIC_DRIVER_VERSION); + +static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; +static int ibmvnic_remove(struct vio_dev *); +static void release_sub_crqs(struct ibmvnic_adapter *); +static int ibmvnic_reset_crq(struct ibmvnic_adapter *); +static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); +static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); +static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); +static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, + union sub_crq *sub_crq); +static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); +static int enable_scrq_irq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int disable_scrq_irq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int pending_scrq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int ibmvnic_poll(struct napi_struct *napi, int data); +static void send_map_query(struct ibmvnic_adapter *adapter); +static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); +static void send_request_unmap(struct ibmvnic_adapter *, u8); + +struct ibmvnic_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ + offsetof(struct ibmvnic_statistics, stat)) +#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) + +static const struct ibmvnic_stat ibmvnic_stats[] = { + {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, + {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, + {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, + {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, + {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, + {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, + {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, + {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, + {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, + {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, + {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, + {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, + {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, + {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, + {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, + {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, + {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, + {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, + {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, + {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, + {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, + {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, +}; + +static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, + unsigned long length, unsigned long *number, + unsigned long *irq) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); + *number = retbuf[0]; + *irq = retbuf[1]; + + return rc; +} + +/* net_device_ops functions */ + +static void init_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *rx_pool, int num, int index, + int buff_size, int active) +{ + netdev_dbg(adapter->netdev, + "Initializing rx_pool %d, %d buffs, %d bytes each\n", + index, num, buff_size); + rx_pool->size = num; + rx_pool->index = index; + rx_pool->buff_size = buff_size; + rx_pool->active = active; +} + +static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb, int size) +{ + struct device *dev = &adapter->vdev->dev; + + ltb->size = size; + ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, + GFP_KERNEL); + + if (!ltb->buff) { + dev_err(dev, "Couldn't alloc long term buffer\n"); + return -ENOMEM; + } + ltb->map_id = adapter->map_id; + adapter->map_id++; + send_request_map(adapter, ltb->addr, + ltb->size, ltb->map_id); + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + return 0; +} + +static void free_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb) +{ + struct device *dev = &adapter->vdev->dev; + + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + send_request_unmap(adapter, ltb->map_id); +} + +static int alloc_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + struct device *dev = &adapter->vdev->dev; + int i; + + pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL); + if (!pool->free_map) + return -ENOMEM; + + pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff), + GFP_KERNEL); + + if (!pool->rx_buff) { + dev_err(dev, "Couldn't alloc rx buffers\n"); + kfree(pool->free_map); + return -ENOMEM; + } + + if (alloc_long_term_buff(adapter, &pool->long_term_buff, + pool->size * pool->buff_size)) { + kfree(pool->free_map); + kfree(pool->rx_buff); + return -ENOMEM; + } + + for (i = 0; i < pool->size; ++i) + pool->free_map[i] = i; + + atomic_set(&pool->available, 0); + pool->next_alloc = 0; + pool->next_free = 0; + + return 0; +} + +static void replenish_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + int count = pool->size - atomic_read(&pool->available); + struct device *dev = &adapter->vdev->dev; + int buffers_added = 0; + unsigned long lpar_rc; + union sub_crq sub_crq; + struct sk_buff *skb; + unsigned int offset; + dma_addr_t dma_addr; + unsigned char *dst; + u64 *handle_array; + int shift = 0; + int index; + int i; + + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_rxadd_subcrqs)); + + for (i = 0; i < count; ++i) { + skb = alloc_skb(pool->buff_size, GFP_ATOMIC); + if (!skb) { + dev_err(dev, "Couldn't replenish rx buff\n"); + adapter->replenish_no_mem++; + break; + } + + index = pool->free_map[pool->next_free]; + + if (pool->rx_buff[index].skb) + dev_err(dev, "Inconsistent free_map!\n"); + + /* Copy the skb to the long term mapped DMA buffer */ + offset = index * pool->buff_size; + dst = pool->long_term_buff.buff + offset; + memset(dst, 0, pool->buff_size); + dma_addr = pool->long_term_buff.addr + offset; + pool->rx_buff[index].data = dst; + + pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + pool->rx_buff[index].dma = dma_addr; + pool->rx_buff[index].skb = skb; + pool->rx_buff[index].pool_index = pool->index; + pool->rx_buff[index].size = pool->buff_size; + + memset(&sub_crq, 0, sizeof(sub_crq)); + sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; + sub_crq.rx_add.correlator = + cpu_to_be64((u64)&pool->rx_buff[index]); + sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); + sub_crq.rx_add.map_id = pool->long_term_buff.map_id; + + /* The length field of the sCRQ is defined to be 24 bits so the + * buffer size needs to be left shifted by a byte before it is + * converted to big endian to prevent the last byte from being + * truncated. + */ +#ifdef __LITTLE_ENDIAN__ + shift = 8; +#endif + sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); + + lpar_rc = send_subcrq(adapter, handle_array[pool->index], + &sub_crq); + if (lpar_rc != H_SUCCESS) + goto failure; + + buffers_added++; + adapter->replenish_add_buff_success++; + pool->next_free = (pool->next_free + 1) % pool->size; + } + atomic_add(buffers_added, &pool->available); + return; + +failure: + dev_info(dev, "replenish pools failure\n"); + pool->free_map[pool->next_free] = index; + pool->rx_buff[index].skb = NULL; + if (!dma_mapping_error(dev, dma_addr)) + dma_unmap_single(dev, dma_addr, pool->buff_size, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + adapter->replenish_add_buff_failure++; + atomic_add(buffers_added, &pool->available); +} + +static void replenish_pools(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->migrated) + return; + + adapter->replenish_task_cycles++; + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) { + if (adapter->rx_pool[i].active) + replenish_rx_pool(adapter, &adapter->rx_pool[i]); + } +} + +static void free_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + int i; + + kfree(pool->free_map); + pool->free_map = NULL; + + if (!pool->rx_buff) + return; + + for (i = 0; i < pool->size; i++) { + if (pool->rx_buff[i].skb) { + dev_kfree_skb_any(pool->rx_buff[i].skb); + pool->rx_buff[i].skb = NULL; + } + } + kfree(pool->rx_buff); + pool->rx_buff = NULL; +} + +static int ibmvnic_open(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_pool *tx_pool; + union ibmvnic_crq crq; + int rxadd_subcrqs; + u64 *size_array; + int tx_subcrqs; + int i, j; + + rxadd_subcrqs = + be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + tx_subcrqs = + be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_rxadd_buff_size)); + adapter->map_id = 1; + adapter->napi = kcalloc(adapter->req_rx_queues, + sizeof(struct napi_struct), GFP_KERNEL); + if (!adapter->napi) + goto alloc_napi_failed; + for (i = 0; i < adapter->req_rx_queues; i++) { + netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, + NAPI_POLL_WEIGHT); + napi_enable(&adapter->napi[i]); + } + adapter->rx_pool = + kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); + + if (!adapter->rx_pool) + goto rx_pool_arr_alloc_failed; + send_map_query(adapter); + for (i = 0; i < rxadd_subcrqs; i++) { + init_rx_pool(adapter, &adapter->rx_pool[i], + IBMVNIC_BUFFS_PER_POOL, i, + be64_to_cpu(size_array[i]), 1); + if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { + dev_err(dev, "Couldn't alloc rx pool\n"); + goto rx_pool_alloc_failed; + } + } + adapter->tx_pool = + kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + + if (!adapter->tx_pool) + goto tx_pool_arr_alloc_failed; + for (i = 0; i < tx_subcrqs; i++) { + tx_pool = &adapter->tx_pool[i]; + tx_pool->tx_buff = + kcalloc(adapter->max_tx_entries_per_subcrq, + sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); + if (!tx_pool->tx_buff) + goto tx_pool_alloc_failed; + + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + adapter->max_tx_entries_per_subcrq * + adapter->req_mtu)) + goto tx_ltb_alloc_failed; + + tx_pool->free_map = + kcalloc(adapter->max_tx_entries_per_subcrq, + sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) + goto tx_fm_alloc_failed; + + for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++) + tx_pool->free_map[j] = j; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + } + adapter->bounce_buffer_size = + (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1; + adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size, + GFP_KERNEL); + if (!adapter->bounce_buffer) + goto bounce_alloc_failed; + + adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer, + adapter->bounce_buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dev_err(dev, "Couldn't map tx bounce buffer\n"); + goto bounce_map_failed; + } + replenish_pools(adapter); + + /* We're ready to receive frames, enable the sub-crq interrupts and + * set the logical link state to up + */ + for (i = 0; i < adapter->req_rx_queues; i++) + enable_scrq_irq(adapter, adapter->rx_scrq[i]); + + for (i = 0; i < adapter->req_tx_queues; i++) + enable_scrq_irq(adapter, adapter->tx_scrq[i]); + + memset(&crq, 0, sizeof(crq)); + crq.logical_link_state.first = IBMVNIC_CRQ_CMD; + crq.logical_link_state.cmd = LOGICAL_LINK_STATE; + crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP; + ibmvnic_send_crq(adapter, &crq); + + netif_start_queue(netdev); + return 0; + +bounce_map_failed: + kfree(adapter->bounce_buffer); +bounce_alloc_failed: + i = tx_subcrqs - 1; + kfree(adapter->tx_pool[i].free_map); +tx_fm_alloc_failed: + free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff); +tx_ltb_alloc_failed: + kfree(adapter->tx_pool[i].tx_buff); +tx_pool_alloc_failed: + for (j = 0; j < i; j++) { + kfree(adapter->tx_pool[j].tx_buff); + free_long_term_buff(adapter, + &adapter->tx_pool[j].long_term_buff); + kfree(adapter->tx_pool[j].free_map); + } + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; +tx_pool_arr_alloc_failed: + i = rxadd_subcrqs; +rx_pool_alloc_failed: + for (j = 0; j < i; j++) { + free_rx_pool(adapter, &adapter->rx_pool[j]); + free_long_term_buff(adapter, + &adapter->rx_pool[j].long_term_buff); + } + kfree(adapter->rx_pool); + adapter->rx_pool = NULL; +rx_pool_arr_alloc_failed: + for (i = 0; i < adapter->req_rx_queues; i++) + napi_enable(&adapter->napi[i]); +alloc_napi_failed: + return -ENOMEM; +} + +static int ibmvnic_close(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + int i; + + adapter->closing = true; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_disable(&adapter->napi[i]); + + netif_stop_queue(netdev); + + if (adapter->bounce_buffer) { + if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dma_unmap_single(&adapter->vdev->dev, + adapter->bounce_buffer_dma, + adapter->bounce_buffer_size, + DMA_BIDIRECTIONAL); + adapter->bounce_buffer_dma = DMA_ERROR_CODE; + } + kfree(adapter->bounce_buffer); + adapter->bounce_buffer = NULL; + } + + memset(&crq, 0, sizeof(crq)); + crq.logical_link_state.first = IBMVNIC_CRQ_CMD; + crq.logical_link_state.cmd = LOGICAL_LINK_STATE; + crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN; + ibmvnic_send_crq(adapter, &crq); + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + i++) { + kfree(adapter->tx_pool[i].tx_buff); + free_long_term_buff(adapter, + &adapter->tx_pool[i].long_term_buff); + kfree(adapter->tx_pool[i].free_map); + } + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) { + free_rx_pool(adapter, &adapter->rx_pool[i]); + free_long_term_buff(adapter, + &adapter->rx_pool[i].long_term_buff); + } + kfree(adapter->rx_pool); + adapter->rx_pool = NULL; + + adapter->closing = false; + + return 0; +} + +static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int queue_num = skb_get_queue_mapping(skb); + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_buff *tx_buff = NULL; + struct ibmvnic_tx_pool *tx_pool; + unsigned int tx_send_failed = 0; + unsigned int tx_map_failed = 0; + unsigned int tx_dropped = 0; + unsigned int tx_packets = 0; + unsigned int tx_bytes = 0; + dma_addr_t data_dma_addr; + struct netdev_queue *txq; + bool used_bounce = false; + unsigned long lpar_rc; + union sub_crq tx_crq; + unsigned int offset; + unsigned char *dst; + u64 *handle_array; + int index = 0; + int ret = 0; + + tx_pool = &adapter->tx_pool[queue_num]; + txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_txsubm_subcrqs)); + if (adapter->migrated) { + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + + index = tx_pool->free_map[tx_pool->consumer_index]; + offset = index * adapter->req_mtu; + dst = tx_pool->long_term_buff.buff + offset; + memset(dst, 0, adapter->req_mtu); + skb_copy_from_linear_data(skb, dst, skb->len); + data_dma_addr = tx_pool->long_term_buff.addr + offset; + + tx_pool->consumer_index = + (tx_pool->consumer_index + 1) % + adapter->max_tx_entries_per_subcrq; + + tx_buff = &tx_pool->tx_buff[index]; + tx_buff->skb = skb; + tx_buff->data_dma[0] = data_dma_addr; + tx_buff->data_len[0] = skb->len; + tx_buff->index = index; + tx_buff->pool_index = queue_num; + tx_buff->last_frag = true; + tx_buff->used_bounce = used_bounce; + + memset(&tx_crq, 0, sizeof(tx_crq)); + tx_crq.v1.first = IBMVNIC_CRQ_CMD; + tx_crq.v1.type = IBMVNIC_TX_DESC; + tx_crq.v1.n_crq_elem = 1; + tx_crq.v1.n_sge = 1; + tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; + tx_crq.v1.correlator = cpu_to_be32(index); + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.sge_len = cpu_to_be32(skb->len); + tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); + + if (adapter->vlan_header_insertion) { + tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; + tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); + } + + if (skb->protocol == htons(ETH_P_IP)) { + if (ip_hdr(skb)->version == 4) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; + else if (ip_hdr(skb)->version == 6) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; + + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; + else if (ip_hdr(skb)->protocol != IPPROTO_TCP) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; + + lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); + + if (lpar_rc != H_SUCCESS) { + dev_err(dev, "tx failed with code %ld\n", lpar_rc); + + if (tx_pool->consumer_index == 0) + tx_pool->consumer_index = + adapter->max_tx_entries_per_subcrq - 1; + else + tx_pool->consumer_index--; + + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + tx_packets++; + tx_bytes += skb->len; + txq->trans_start = jiffies; + ret = NETDEV_TX_OK; + +out: + netdev->stats.tx_dropped += tx_dropped; + netdev->stats.tx_bytes += tx_bytes; + netdev->stats.tx_packets += tx_packets; + adapter->tx_send_failed += tx_send_failed; + adapter->tx_map_failed += tx_map_failed; + + return ret; +} + +static void ibmvnic_set_multi(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_capability.first = IBMVNIC_CRQ_CMD; + crq.request_capability.cmd = REQUEST_CAPABILITY; + + if (netdev->flags & IFF_PROMISC) { + if (!adapter->promisc_supported) + return; + } else { + if (netdev->flags & IFF_ALLMULTI) { + /* Accept all multicast */ + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; + ibmvnic_send_crq(adapter, &crq); + } else if (netdev_mc_empty(netdev)) { + /* Reject all multicast */ + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; + ibmvnic_send_crq(adapter, &crq); + } else { + /* Accept one or more multicast(s) */ + netdev_for_each_mc_addr(ha, netdev) { + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; + ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], + ha->addr); + ibmvnic_send_crq(adapter, &crq); + } + } + } +} + +static int ibmvnic_set_mac(struct net_device *netdev, void *p) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + union ibmvnic_crq crq; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memset(&crq, 0, sizeof(crq)); + crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; + crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; + ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); + ibmvnic_send_crq(adapter, &crq); + /* netdev->dev_addr is changed in handle_change_mac_rsp function */ + return 0; +} + +static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu) + return -EINVAL; + + netdev->mtu = new_mtu; + return 0; +} + +static void ibmvnic_tx_timeout(struct net_device *dev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + int rc; + + /* Adapter timed out, resetting it */ + release_sub_crqs(adapter); + rc = ibmvnic_reset_crq(adapter); + if (rc) + dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n"); + else + ibmvnic_send_crq_init(adapter); +} + +static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_buff *rx_buff) +{ + struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; + + rx_buff->skb = NULL; + + pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); + pool->next_alloc = (pool->next_alloc + 1) % pool->size; + + atomic_dec(&pool->available); +} + +static int ibmvnic_poll(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int scrq_num = (int)(napi - adapter->napi); + int frames_processed = 0; +restart_poll: + while (frames_processed < budget) { + struct sk_buff *skb; + struct ibmvnic_rx_buff *rx_buff; + union sub_crq *next; + u32 length; + u16 offset; + u8 flags = 0; + + if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) + break; + next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); + rx_buff = + (struct ibmvnic_rx_buff *)be64_to_cpu(next-> + rx_comp.correlator); + /* do error checking */ + if (next->rx_comp.rc) { + netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); + /* free the entry */ + next->rx_comp.first = 0; + remove_buff_from_pool(adapter, rx_buff); + break; + } + + length = be32_to_cpu(next->rx_comp.len); + offset = be16_to_cpu(next->rx_comp.off_frame_data); + flags = next->rx_comp.flags; + skb = rx_buff->skb; + skb_copy_to_linear_data(skb, rx_buff->data + offset, + length); + skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci); + /* free the entry */ + next->rx_comp.first = 0; + remove_buff_from_pool(adapter, rx_buff); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); + + if (flags & IBMVNIC_IP_CHKSUM_GOOD && + flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + length = skb->len; + napi_gro_receive(napi, skb); /* send it up */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + frames_processed++; + } + replenish_pools(adapter); + + if (frames_processed < budget) { + enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + napi_complete(napi); + if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && + napi_reschedule(napi)) { + disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + goto restart_poll; + } + } + return frames_processed; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ibmvnic_netpoll_controller(struct net_device *dev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + int i; + + replenish_pools(netdev_priv(dev)); + for (i = 0; i < adapter->req_rx_queues; i++) + ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); +} +#endif + +static const struct net_device_ops ibmvnic_netdev_ops = { + .ndo_open = ibmvnic_open, + .ndo_stop = ibmvnic_close, + .ndo_start_xmit = ibmvnic_xmit, + .ndo_set_rx_mode = ibmvnic_set_multi, + .ndo_set_mac_address = ibmvnic_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ibmvnic_change_mtu, + .ndo_tx_timeout = ibmvnic_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ibmvnic_netpoll_controller, +#endif +}; + +/* ethtool functions */ + +static int ibmvnic_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 1; + return 0; +} + +static void ibmvnic_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); + strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); +} + +static u32 ibmvnic_get_msglevel(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static u32 ibmvnic_get_link(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + /* Don't need to send a query because we request a logical link up at + * init and then we wait for link state indications + */ + return adapter->logical_link_state; +} + +static void ibmvnic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + ring->rx_max_pending = 0; + ring->tx_max_pending = 0; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = 0; + ring->tx_pending = 0; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmvnic_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ibmvnic_stats); + default: + return -EOPNOTSUPP; + } +} + +static void ibmvnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + union ibmvnic_crq crq; + int i; + + memset(&crq, 0, sizeof(crq)); + crq.request_statistics.first = IBMVNIC_CRQ_CMD; + crq.request_statistics.cmd = REQUEST_STATISTICS; + crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); + crq.request_statistics.len = + cpu_to_be32(sizeof(struct ibmvnic_statistics)); + ibmvnic_send_crq(adapter, &crq); + + /* Wait for data to be written */ + init_completion(&adapter->stats_done); + wait_for_completion(&adapter->stats_done); + + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) + data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); +} + +static const struct ethtool_ops ibmvnic_ethtool_ops = { + .get_settings = ibmvnic_get_settings, + .get_drvinfo = ibmvnic_get_drvinfo, + .get_msglevel = ibmvnic_get_msglevel, + .set_msglevel = ibmvnic_set_msglevel, + .get_link = ibmvnic_get_link, + .get_ringparam = ibmvnic_get_ringparam, + .get_strings = ibmvnic_get_strings, + .get_sset_count = ibmvnic_get_sset_count, + .get_ethtool_stats = ibmvnic_get_ethtool_stats, +}; + +/* Routines for managing CRQs/sCRQs */ + +static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + long rc; + + netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); + + /* Close the sub-crqs */ + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)scrq->msgs, 2); + kfree(scrq); +} + +static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter + *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_sub_crq_queue *scrq; + int rc; + + scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC); + if (!scrq) + return NULL; + + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + memset(scrq->msgs, 0, 4 * PAGE_SIZE); + if (!scrq->msgs) { + dev_warn(dev, "Couldn't allocate crq queue messages page\n"); + goto zero_page_failed; + } + + scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, scrq->msg_token)) { + dev_warn(dev, "Couldn't map crq queue messages page\n"); + goto map_failed; + } + + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, + 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); + + if (rc == H_RESOURCE) + rc = ibmvnic_reset_crq(adapter); + + if (rc == H_CLOSED) { + dev_warn(dev, "Partner adapter not ready, waiting.\n"); + } else if (rc) { + dev_warn(dev, "Error %d registering sub-crq\n", rc); + goto reg_failed; + } + + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); + if (scrq->irq == NO_IRQ) { + dev_err(dev, "Error mapping irq\n"); + goto map_irq_failed; + } + + scrq->adapter = adapter; + scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); + scrq->cur = 0; + scrq->rx_skb_top = NULL; + spin_lock_init(&scrq->lock); + + netdev_dbg(adapter->netdev, + "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", + scrq->crq_num, scrq->hw_irq, scrq->irq); + + return scrq; + +map_irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_failed: + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); +map_failed: + free_pages((unsigned long)scrq->msgs, 2); +zero_page_failed: + kfree(scrq); + + return NULL; +} + +static void release_sub_crqs(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->tx_scrq) { + for (i = 0; i < adapter->req_tx_queues; i++) + if (adapter->tx_scrq[i]) { + free_irq(adapter->tx_scrq[i]->irq, + adapter->tx_scrq[i]); + release_sub_crq_queue(adapter, + adapter->tx_scrq[i]); + } + adapter->tx_scrq = NULL; + } + + if (adapter->rx_scrq) { + for (i = 0; i < adapter->req_rx_queues; i++) + if (adapter->rx_scrq[i]) { + free_irq(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); + release_sub_crq_queue(adapter, + adapter->rx_scrq[i]); + } + adapter->rx_scrq = NULL; + } + + adapter->requested_caps = 0; +} + +static int disable_scrq_irq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long rc; + + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); + if (rc) + dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", + scrq->hw_irq, rc); + return rc; +} + +static int enable_scrq_irq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long rc; + + if (scrq->hw_irq > 0x100000000ULL) { + dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); + return 1; + } + + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); + if (rc) + dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", + scrq->hw_irq, rc); + return rc; +} + +static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_buff *txbuff; + union sub_crq *next; + int index; + int i, j; + +restart_loop: + while (pending_scrq(adapter, scrq)) { + unsigned int pool = scrq->pool_index; + + next = ibmvnic_next_scrq(adapter, scrq); + for (i = 0; i < next->tx_comp.num_comps; i++) { + if (next->tx_comp.rcs[i]) { + dev_err(dev, "tx error %x\n", + next->tx_comp.rcs[i]); + continue; + } + index = be32_to_cpu(next->tx_comp.correlators[i]); + txbuff = &adapter->tx_pool[pool].tx_buff[index]; + + for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { + if (!txbuff->data_dma[j]) + continue; + + txbuff->data_dma[j] = 0; + txbuff->used_bounce = false; + } + + if (txbuff->last_frag) + dev_kfree_skb_any(txbuff->skb); + + adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. + producer_index] = index; + adapter->tx_pool[pool].producer_index = + (adapter->tx_pool[pool].producer_index + 1) % + adapter->max_tx_entries_per_subcrq; + } + /* remove tx_comp scrq*/ + next->tx_comp.first = 0; + } + + enable_scrq_irq(adapter, scrq); + + if (pending_scrq(adapter, scrq)) { + disable_scrq_irq(adapter, scrq); + goto restart_loop; + } + + return 0; +} + +static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) +{ + struct ibmvnic_sub_crq_queue *scrq = instance; + struct ibmvnic_adapter *adapter = scrq->adapter; + + disable_scrq_irq(adapter, scrq); + ibmvnic_complete_tx(adapter, scrq); + + return IRQ_HANDLED; +} + +static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) +{ + struct ibmvnic_sub_crq_queue *scrq = instance; + struct ibmvnic_adapter *adapter = scrq->adapter; + + if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { + disable_scrq_irq(adapter, scrq); + __napi_schedule(&adapter->napi[scrq->scrq_num]); + } + + return IRQ_HANDLED; +} + +static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_sub_crq_queue **allqueues; + int registered_queues = 0; + union ibmvnic_crq crq; + int total_queues; + int more = 0; + int i, j; + int rc; + + if (!retry) { + /* Sub-CRQ entries are 32 byte long */ + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + + if (adapter->min_tx_entries_per_subcrq > entries_page || + adapter->min_rx_add_entries_per_subcrq > entries_page) { + dev_err(dev, "Fatal, invalid entries per sub-crq\n"); + goto allqueues_failed; + } + + /* Get the minimum between the queried max and the entries + * that fit in our PAGE_SIZE + */ + adapter->req_tx_entries_per_subcrq = + adapter->max_tx_entries_per_subcrq > entries_page ? + entries_page : adapter->max_tx_entries_per_subcrq; + adapter->req_rx_add_entries_per_subcrq = + adapter->max_rx_add_entries_per_subcrq > entries_page ? + entries_page : adapter->max_rx_add_entries_per_subcrq; + + /* Choosing the maximum number of queues supported by firmware*/ + adapter->req_tx_queues = adapter->min_tx_queues; + adapter->req_rx_queues = adapter->min_rx_queues; + adapter->req_rx_add_queues = adapter->min_rx_add_queues; + + adapter->req_mtu = adapter->max_mtu; + } + + total_queues = adapter->req_tx_queues + adapter->req_rx_queues; + + allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC); + if (!allqueues) + goto allqueues_failed; + + for (i = 0; i < total_queues; i++) { + allqueues[i] = init_sub_crq_queue(adapter); + if (!allqueues[i]) { + dev_warn(dev, "Couldn't allocate all sub-crqs\n"); + break; + } + registered_queues++; + } + + /* Make sure we were able to register the minimum number of queues */ + if (registered_queues < + adapter->min_tx_queues + adapter->min_rx_queues) { + dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); + goto tx_failed; + } + + /* Distribute the failed allocated queues*/ + for (i = 0; i < total_queues - registered_queues + more ; i++) { + netdev_dbg(adapter->netdev, "Reducing number of queues\n"); + switch (i % 3) { + case 0: + if (adapter->req_rx_queues > adapter->min_rx_queues) + adapter->req_rx_queues--; + else + more++; + break; + case 1: + if (adapter->req_tx_queues > adapter->min_tx_queues) + adapter->req_tx_queues--; + else + more++; + break; + } + } + + adapter->tx_scrq = kcalloc(adapter->req_tx_queues, + sizeof(*adapter->tx_scrq), GFP_ATOMIC); + if (!adapter->tx_scrq) + goto tx_failed; + + for (i = 0; i < adapter->req_tx_queues; i++) { + adapter->tx_scrq[i] = allqueues[i]; + adapter->tx_scrq[i]->pool_index = i; + rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx, + 0, "ibmvnic_tx", adapter->tx_scrq[i]); + if (rc) { + dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", + adapter->tx_scrq[i]->irq, rc); + goto req_tx_irq_failed; + } + } + + adapter->rx_scrq = kcalloc(adapter->req_rx_queues, + sizeof(*adapter->rx_scrq), GFP_ATOMIC); + if (!adapter->rx_scrq) + goto rx_failed; + + for (i = 0; i < adapter->req_rx_queues; i++) { + adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; + adapter->rx_scrq[i]->scrq_num = i; + rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx, + 0, "ibmvnic_rx", adapter->rx_scrq[i]); + if (rc) { + dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", + adapter->rx_scrq[i]->irq, rc); + goto req_rx_irq_failed; + } + } + + memset(&crq, 0, sizeof(crq)); + crq.request_capability.first = IBMVNIC_CRQ_CMD; + crq.request_capability.cmd = REQUEST_CAPABILITY; + + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be32(adapter->req_tx_entries_per_subcrq); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_MTU); + crq.request_capability.number = cpu_to_be32(adapter->req_mtu); + ibmvnic_send_crq(adapter, &crq); + + if (adapter->netdev->flags & IFF_PROMISC) { + if (adapter->promisc_supported) { + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be32(1); + ibmvnic_send_crq(adapter, &crq); + } + } else { + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be32(0); + ibmvnic_send_crq(adapter, &crq); + } + + kfree(allqueues); + + return; + +req_rx_irq_failed: + for (j = 0; j < i; j++) + free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); + i = adapter->req_tx_queues; +req_tx_irq_failed: + for (j = 0; j < i; j++) + free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); + kfree(adapter->rx_scrq); + adapter->rx_scrq = NULL; +rx_failed: + kfree(adapter->tx_scrq); + adapter->tx_scrq = NULL; +tx_failed: + for (i = 0; i < registered_queues; i++) + release_sub_crq_queue(adapter, allqueues[i]); + kfree(allqueues); +allqueues_failed: + ibmvnic_remove(adapter->vdev); +} + +static int pending_scrq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + union sub_crq *entry = &scrq->msgs[scrq->cur]; + + if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing) + return 1; + else + return 0; +} + +static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + union sub_crq *entry; + unsigned long flags; + + spin_lock_irqsave(&scrq->lock, flags); + entry = &scrq->msgs[scrq->cur]; + if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { + if (++scrq->cur == scrq->size) + scrq->cur = 0; + } else { + entry = NULL; + } + spin_unlock_irqrestore(&scrq->lock, flags); + + return entry; +} + +static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *queue = &adapter->crq; + union ibmvnic_crq *crq; + + crq = &queue->msgs[queue->cur]; + if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { + if (++queue->cur == queue->size) + queue->cur = 0; + } else { + crq = NULL; + } + + return crq; +} + +static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, + union sub_crq *sub_crq) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + u64 *u64_crq = (u64 *)sub_crq; + int rc; + + netdev_dbg(adapter->netdev, + "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", + (unsigned long int)cpu_to_be64(remote_handle), + (unsigned long int)cpu_to_be64(u64_crq[0]), + (unsigned long int)cpu_to_be64(u64_crq[1]), + (unsigned long int)cpu_to_be64(u64_crq[2]), + (unsigned long int)cpu_to_be64(u64_crq[3])); + + /* Make sure the hypervisor sees the complete request */ + mb(); + + rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, + cpu_to_be64(remote_handle), + cpu_to_be64(u64_crq[0]), + cpu_to_be64(u64_crq[1]), + cpu_to_be64(u64_crq[2]), + cpu_to_be64(u64_crq[3])); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_err(dev, "Send error (rc=%d)\n", rc); + } + + return rc; +} + +static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, + union ibmvnic_crq *crq) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + u64 *u64_crq = (u64 *)crq; + int rc; + + netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", + (unsigned long int)cpu_to_be64(u64_crq[0]), + (unsigned long int)cpu_to_be64(u64_crq[1])); + + /* Make sure the hypervisor sees the complete request */ + mb(); + + rc = plpar_hcall_norets(H_SEND_CRQ, ua, + cpu_to_be64(u64_crq[0]), + cpu_to_be64(u64_crq[1])); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_warn(dev, "Send error (rc=%d)\n", rc); + } + + return rc; +} + +static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.generic.first = IBMVNIC_CRQ_INIT_CMD; + crq.generic.cmd = IBMVNIC_CRQ_INIT; + netdev_dbg(adapter->netdev, "Sending CRQ init\n"); + + return ibmvnic_send_crq(adapter, &crq); +} + +static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.generic.first = IBMVNIC_CRQ_INIT_CMD; + crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; + netdev_dbg(adapter->netdev, "Sending CRQ init complete\n"); + + return ibmvnic_send_crq(adapter, &crq); +} + +static int send_version_xchg(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.version_exchange.first = IBMVNIC_CRQ_CMD; + crq.version_exchange.cmd = VERSION_EXCHANGE; + crq.version_exchange.version = cpu_to_be16(ibmvnic_version); + + return ibmvnic_send_crq(adapter, &crq); +} + +static void send_login(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_login_rsp_buffer *login_rsp_buffer; + struct ibmvnic_login_buffer *login_buffer; + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + dma_addr_t rsp_buffer_token; + dma_addr_t buffer_token; + size_t rsp_buffer_size; + union ibmvnic_crq crq; + unsigned long flags; + size_t buffer_size; + __be64 *tx_list_p; + __be64 *rx_list_p; + int i; + + buffer_size = + sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues); + + login_buffer = kmalloc(buffer_size, GFP_ATOMIC); + if (!login_buffer) + goto buf_alloc_failed; + + buffer_token = dma_map_single(dev, login_buffer, buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buffer_token)) { + dev_err(dev, "Couldn't map login buffer\n"); + goto buf_map_failed; + } + + rsp_buffer_size = + sizeof(struct ibmvnic_login_rsp_buffer) + + sizeof(u64) * (adapter->req_tx_queues + + adapter->req_rx_queues * + adapter->req_rx_add_queues + adapter-> + req_rx_add_queues) + + sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS); + + login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); + if (!login_rsp_buffer) + goto buf_rsp_alloc_failed; + + rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, + rsp_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, rsp_buffer_token)) { + dev_err(dev, "Couldn't map login rsp buffer\n"); + goto buf_rsp_map_failed; + } + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dev_err(dev, "Couldn't allocate inflight_cmd\n"); + goto inflight_alloc_failed; + } + adapter->login_buf = login_buffer; + adapter->login_buf_token = buffer_token; + adapter->login_buf_sz = buffer_size; + adapter->login_rsp_buf = login_rsp_buffer; + adapter->login_rsp_buf_token = rsp_buffer_token; + adapter->login_rsp_buf_sz = rsp_buffer_size; + + login_buffer->len = cpu_to_be32(buffer_size); + login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); + login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); + login_buffer->off_txcomp_subcrqs = + cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); + login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); + login_buffer->off_rxcomp_subcrqs = + cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * adapter->req_tx_queues); + login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); + login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); + + tx_list_p = (__be64 *)((char *)login_buffer + + sizeof(struct ibmvnic_login_buffer)); + rx_list_p = (__be64 *)((char *)login_buffer + + sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * adapter->req_tx_queues); + + for (i = 0; i < adapter->req_tx_queues; i++) { + if (adapter->tx_scrq[i]) { + tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> + crq_num); + } + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + if (adapter->rx_scrq[i]) { + rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> + crq_num); + } + } + + netdev_dbg(adapter->netdev, "Login Buffer:\n"); + for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(adapter->login_buf))[i]); + } + + memset(&crq, 0, sizeof(crq)); + crq.login.first = IBMVNIC_CRQ_CMD; + crq.login.cmd = LOGIN; + crq.login.ioba = cpu_to_be32(buffer_token); + crq.login.len = cpu_to_be32(buffer_size); + + memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &crq); + + return; + +inflight_alloc_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); +buf_rsp_map_failed: + kfree(login_rsp_buffer); +buf_rsp_alloc_failed: + dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); +buf_map_failed: + kfree(login_buffer); +buf_alloc_failed: + return; +} + +static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, + u32 len, u8 map_id) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_map.first = IBMVNIC_CRQ_CMD; + crq.request_map.cmd = REQUEST_MAP; + crq.request_map.map_id = map_id; + crq.request_map.ioba = cpu_to_be32(addr); + crq.request_map.len = cpu_to_be32(len); + ibmvnic_send_crq(adapter, &crq); +} + +static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_unmap.first = IBMVNIC_CRQ_CMD; + crq.request_unmap.cmd = REQUEST_UNMAP; + crq.request_unmap.map_id = map_id; + ibmvnic_send_crq(adapter, &crq); +} + +static void send_map_query(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.query_map.first = IBMVNIC_CRQ_CMD; + crq.query_map.cmd = QUERY_MAP; + ibmvnic_send_crq(adapter, &crq); +} + +/* Send a series of CRQs requesting various capabilities of the VNIC server */ +static void send_cap_queries(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + atomic_set(&adapter->running_cap_queries, 0); + memset(&crq, 0, sizeof(crq)); + crq.query_capability.first = IBMVNIC_CRQ_CMD; + crq.query_capability.cmd = QUERY_CAPABILITY; + + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_MTU); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_MTU); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); +} + +static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; + union ibmvnic_crq crq; + int i; + + dma_unmap_single(dev, adapter->ip_offload_tok, + sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); + + netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); + for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(buf))[i]); + + netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); + netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); + netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", + buf->tcp_ipv4_chksum); + netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", + buf->tcp_ipv6_chksum); + netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", + buf->udp_ipv4_chksum); + netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", + buf->udp_ipv6_chksum); + netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", + buf->large_tx_ipv4); + netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", + buf->large_tx_ipv6); + netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", + buf->large_rx_ipv4); + netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", + buf->large_rx_ipv6); + netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", + buf->max_ipv4_header_size); + netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", + buf->max_ipv6_header_size); + netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", + buf->max_tcp_header_size); + netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", + buf->max_udp_header_size); + netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", + buf->max_large_tx_size); + netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", + buf->max_large_rx_size); + netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", + buf->ipv6_extension_header); + netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", + buf->tcp_pseudosum_req); + netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", + buf->num_ipv6_ext_headers); + netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", + buf->off_ipv6_ext_headers); + + adapter->ip_offload_ctrl_tok = + dma_map_single(dev, &adapter->ip_offload_ctrl, + sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); + + if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { + dev_err(dev, "Couldn't map ip offload control buffer\n"); + return; + } + + adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); + adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; + adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; + adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; + adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; + + /* large_tx/rx disabled for now, additional features needed */ + adapter->ip_offload_ctrl.large_tx_ipv4 = 0; + adapter->ip_offload_ctrl.large_tx_ipv6 = 0; + adapter->ip_offload_ctrl.large_rx_ipv4 = 0; + adapter->ip_offload_ctrl.large_rx_ipv6 = 0; + + adapter->netdev->features = NETIF_F_GSO; + + if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) + adapter->netdev->features |= NETIF_F_IP_CSUM; + + if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) + adapter->netdev->features |= NETIF_F_IPV6_CSUM; + + memset(&crq, 0, sizeof(crq)); + crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; + crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; + crq.control_ip_offload.len = + cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); + crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); + ibmvnic_send_crq(adapter, &crq); +} + +static void handle_error_info_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + unsigned long flags; + bool found = false; + int i; + + if (!crq->request_error_rsp.rc.code) { + dev_info(dev, "Request Error Rsp returned with rc=%x\n", + crq->request_error_rsp.rc.code); + return; + } + + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_for_each_entry(error_buff, &adapter->errors, list) + if (error_buff->error_id == crq->request_error_rsp.error_id) { + found = true; + list_del(&error_buff->list); + break; + } + spin_unlock_irqrestore(&adapter->error_list_lock, flags); + + if (!found) { + dev_err(dev, "Couldn't find error id %x\n", + crq->request_error_rsp.error_id); + return; + } + + dev_err(dev, "Detailed info for error id %x:", + crq->request_error_rsp.error_id); + + for (i = 0; i < error_buff->len; i++) { + pr_cont("%02x", (int)error_buff->buff[i]); + if (i % 8 == 7) + pr_cont(" "); + } + pr_cont("\n"); + + dma_unmap_single(dev, error_buff->dma, error_buff->len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + kfree(error_buff); +} + +static void handle_dump_size_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int len = be32_to_cpu(crq->request_dump_size_rsp.len); + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq newcrq; + unsigned long flags; + + /* allocate and map buffer */ + adapter->dump_data = kmalloc(len, GFP_KERNEL); + if (!adapter->dump_data) { + complete(&adapter->fw_done); + return; + } + + adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, adapter->dump_data_token)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map dump data\n"); + kfree(adapter->dump_data); + complete(&adapter->fw_done); + return; + } + + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dma_unmap_single(dev, adapter->dump_data_token, len, + DMA_FROM_DEVICE); + kfree(adapter->dump_data); + complete(&adapter->fw_done); + return; + } + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.request_dump.first = IBMVNIC_CRQ_CMD; + newcrq.request_dump.cmd = REQUEST_DUMP; + newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token); + newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size); + + memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &newcrq); +} + +static void handle_error_indication(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz); + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + union ibmvnic_crq new_crq; + unsigned long flags; + + dev_err(dev, "Firmware reports %serror id %x, cause %d\n", + crq->error_indication. + flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", + crq->error_indication.error_id, + crq->error_indication.error_cause); + + error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); + if (!error_buff) + return; + + error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); + if (!error_buff->buff) { + kfree(error_buff); + return; + } + + error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, error_buff->dma)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map error buffer\n"); + kfree(error_buff->buff); + kfree(error_buff); + return; + } + + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dma_unmap_single(dev, error_buff->dma, detail_len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + kfree(error_buff); + return; + } + + error_buff->len = detail_len; + error_buff->error_id = crq->error_indication.error_id; + + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_add_tail(&error_buff->list, &adapter->errors); + spin_unlock_irqrestore(&adapter->error_list_lock, flags); + + memset(&new_crq, 0, sizeof(new_crq)); + new_crq.request_error_info.first = IBMVNIC_CRQ_CMD; + new_crq.request_error_info.cmd = REQUEST_ERROR_INFO; + new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); + new_crq.request_error_info.len = cpu_to_be32(detail_len); + new_crq.request_error_info.error_id = crq->error_indication.error_id; + + memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &new_crq); +} + +static void handle_change_mac_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->change_mac_addr_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); + return; + } + memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], + ETH_ALEN); +} + +static void handle_request_cap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + u64 *req_value; + char *name; + + switch (be16_to_cpu(crq->request_capability_rsp.capability)) { + case REQ_TX_QUEUES: + req_value = &adapter->req_tx_queues; + name = "tx"; + break; + case REQ_RX_QUEUES: + req_value = &adapter->req_rx_queues; + name = "rx"; + break; + case REQ_RX_ADD_QUEUES: + req_value = &adapter->req_rx_add_queues; + name = "rx_add"; + break; + case REQ_TX_ENTRIES_PER_SUBCRQ: + req_value = &adapter->req_tx_entries_per_subcrq; + name = "tx_entries_per_subcrq"; + break; + case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: + req_value = &adapter->req_rx_add_entries_per_subcrq; + name = "rx_add_entries_per_subcrq"; + break; + case REQ_MTU: + req_value = &adapter->req_mtu; + name = "mtu"; + break; + case PROMISC_REQUESTED: + req_value = &adapter->promisc; + name = "promisc"; + break; + default: + dev_err(dev, "Got invalid cap request rsp %d\n", + crq->request_capability.capability); + return; + } + + switch (crq->request_capability_rsp.rc.code) { + case SUCCESS: + break; + case PARTIALSUCCESS: + dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", + *req_value, + (long int)be32_to_cpu(crq->request_capability_rsp. + number), name); + release_sub_crqs(adapter); + *req_value = be32_to_cpu(crq->request_capability_rsp.number); + complete(&adapter->init_done); + return; + default: + dev_err(dev, "Error %d in request cap rsp\n", + crq->request_capability_rsp.rc.code); + return; + } + + /* Done receiving requested capabilities, query IP offload support */ + if (++adapter->requested_caps == 7) { + union ibmvnic_crq newcrq; + int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); + struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = + &adapter->ip_offload_buf; + + adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, + buf_sz, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, adapter->ip_offload_tok)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map offload buffer\n"); + return; + } + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; + newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; + newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); + newcrq.query_ip_offload.ioba = + cpu_to_be32(adapter->ip_offload_tok); + + ibmvnic_send_crq(adapter, &newcrq); + } +} + +static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; + struct ibmvnic_login_buffer *login = adapter->login_buf; + union ibmvnic_crq crq; + int i; + + dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); + + netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); + for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(adapter->login_rsp_buf))[i]); + } + + /* Sanity checks */ + if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || + (be32_to_cpu(login->num_rxcomp_subcrqs) * + adapter->req_rx_add_queues != + be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { + dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); + ibmvnic_remove(adapter->vdev); + return -EIO; + } + complete(&adapter->init_done); + + memset(&crq, 0, sizeof(crq)); + crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD; + crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM; + ibmvnic_send_crq(adapter, &crq); + + return 0; +} + +static void handle_request_map_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + u8 map_id = crq->request_map_rsp.map_id; + int tx_subcrqs; + int rx_subcrqs; + long rc; + int i; + + tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + + rc = crq->request_map_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc); + adapter->map_id--; + /* need to find and zero tx/rx_pool map_id */ + for (i = 0; i < tx_subcrqs; i++) { + if (adapter->tx_pool[i].long_term_buff.map_id == map_id) + adapter->tx_pool[i].long_term_buff.map_id = 0; + } + for (i = 0; i < rx_subcrqs; i++) { + if (adapter->rx_pool[i].long_term_buff.map_id == map_id) + adapter->rx_pool[i].long_term_buff.map_id = 0; + } + } + complete(&adapter->fw_done); +} + +static void handle_request_unmap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->request_unmap_rsp.rc.code; + if (rc) + dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); +} + +static void handle_query_map_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->query_map_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); + return; + } + netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", + crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, + crq->query_map_rsp.free_pages); +} + +static void handle_query_cap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + atomic_dec(&adapter->running_cap_queries); + netdev_dbg(netdev, "Outstanding queries: %d\n", + atomic_read(&adapter->running_cap_queries)); + rc = crq->query_capability.rc.code; + if (rc) { + dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); + goto out; + } + + switch (be16_to_cpu(crq->query_capability.capability)) { + case MIN_TX_QUEUES: + adapter->min_tx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_tx_queues = %lld\n", + adapter->min_tx_queues); + break; + case MIN_RX_QUEUES: + adapter->min_rx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_queues = %lld\n", + adapter->min_rx_queues); + break; + case MIN_RX_ADD_QUEUES: + adapter->min_rx_add_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_add_queues = %lld\n", + adapter->min_rx_add_queues); + break; + case MAX_TX_QUEUES: + adapter->max_tx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_queues = %lld\n", + adapter->max_tx_queues); + break; + case MAX_RX_QUEUES: + adapter->max_rx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_queues = %lld\n", + adapter->max_rx_queues); + break; + case MAX_RX_ADD_QUEUES: + adapter->max_rx_add_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_queues = %lld\n", + adapter->max_rx_add_queues); + break; + case MIN_TX_ENTRIES_PER_SUBCRQ: + adapter->min_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", + adapter->min_tx_entries_per_subcrq); + break; + case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->min_rx_add_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", + adapter->min_rx_add_entries_per_subcrq); + break; + case MAX_TX_ENTRIES_PER_SUBCRQ: + adapter->max_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", + adapter->max_tx_entries_per_subcrq); + break; + case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->max_rx_add_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", + adapter->max_rx_add_entries_per_subcrq); + break; + case TCP_IP_OFFLOAD: + adapter->tcp_ip_offload = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "tcp_ip_offload = %lld\n", + adapter->tcp_ip_offload); + break; + case PROMISC_SUPPORTED: + adapter->promisc_supported = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "promisc_supported = %lld\n", + adapter->promisc_supported); + break; + case MIN_MTU: + adapter->min_mtu = be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); + break; + case MAX_MTU: + adapter->max_mtu = be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); + break; + case MAX_MULTICAST_FILTERS: + adapter->max_multicast_filters = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_multicast_filters = %lld\n", + adapter->max_multicast_filters); + break; + case VLAN_HEADER_INSERTION: + adapter->vlan_header_insertion = + be32_to_cpu(crq->query_capability.number); + if (adapter->vlan_header_insertion) + netdev->features |= NETIF_F_HW_VLAN_STAG_TX; + netdev_dbg(netdev, "vlan_header_insertion = %lld\n", + adapter->vlan_header_insertion); + break; + case MAX_TX_SG_ENTRIES: + adapter->max_tx_sg_entries = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", + adapter->max_tx_sg_entries); + break; + case RX_SG_SUPPORTED: + adapter->rx_sg_supported = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "rx_sg_supported = %lld\n", + adapter->rx_sg_supported); + break; + case OPT_TX_COMP_SUB_QUEUES: + adapter->opt_tx_comp_sub_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", + adapter->opt_tx_comp_sub_queues); + break; + case OPT_RX_COMP_QUEUES: + adapter->opt_rx_comp_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", + adapter->opt_rx_comp_queues); + break; + case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: + adapter->opt_rx_bufadd_q_per_rx_comp_q = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", + adapter->opt_rx_bufadd_q_per_rx_comp_q); + break; + case OPT_TX_ENTRIES_PER_SUBCRQ: + adapter->opt_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", + adapter->opt_tx_entries_per_subcrq); + break; + case OPT_RXBA_ENTRIES_PER_SUBCRQ: + adapter->opt_rxba_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", + adapter->opt_rxba_entries_per_subcrq); + break; + case TX_RX_DESC_REQ: + adapter->tx_rx_desc_req = crq->query_capability.number; + netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", + adapter->tx_rx_desc_req); + break; + + default: + netdev_err(netdev, "Got invalid cap rsp %d\n", + crq->query_capability.capability); + } + +out: + if (atomic_read(&adapter->running_cap_queries) == 0) + complete(&adapter->init_done); + /* We're done querying the capabilities, initialize sub-crqs */ +} + +static void handle_control_ras_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + u8 correlator = crq->control_ras_rsp.correlator; + struct device *dev = &adapter->vdev->dev; + bool found = false; + int i; + + if (crq->control_ras_rsp.rc.code) { + dev_warn(dev, "Control ras failed rc=%d\n", + crq->control_ras_rsp.rc.code); + return; + } + + for (i = 0; i < adapter->ras_comp_num; i++) { + if (adapter->ras_comps[i].correlator == correlator) { + found = true; + break; + } + } + + if (!found) { + dev_warn(dev, "Correlator not found on control_ras_rsp\n"); + return; + } + + switch (crq->control_ras_rsp.op) { + case IBMVNIC_TRACE_LEVEL: + adapter->ras_comps[i].trace_level = crq->control_ras.level; + break; + case IBMVNIC_ERROR_LEVEL: + adapter->ras_comps[i].error_check_level = + crq->control_ras.level; + break; + case IBMVNIC_TRACE_PAUSE: + adapter->ras_comp_int[i].paused = 1; + break; + case IBMVNIC_TRACE_RESUME: + adapter->ras_comp_int[i].paused = 0; + break; + case IBMVNIC_TRACE_ON: + adapter->ras_comps[i].trace_on = 1; + break; + case IBMVNIC_TRACE_OFF: + adapter->ras_comps[i].trace_on = 0; + break; + case IBMVNIC_CHG_TRACE_BUFF_SZ: + /* trace_buff_sz is 3 bytes, stuff it into an int */ + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] = + crq->control_ras_rsp.trace_buff_sz[0]; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] = + crq->control_ras_rsp.trace_buff_sz[1]; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] = + crq->control_ras_rsp.trace_buff_sz[2]; + break; + default: + dev_err(dev, "invalid op %d on control_ras_rsp", + crq->control_ras_rsp.op); + } +} + +static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_fw_trace_entry *trace; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + dma_addr_t trace_tok; + + if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) + return 0; + + trace = + dma_alloc_coherent(dev, + be32_to_cpu(adapter->ras_comps[num]. + trace_buff_size), &trace_tok, + GFP_KERNEL); + if (!trace) { + dev_err(dev, "Couldn't alloc trace buffer\n"); + return 0; + } + + memset(&crq, 0, sizeof(crq)); + crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD; + crq.collect_fw_trace.cmd = COLLECT_FW_TRACE; + crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; + crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); + crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; + ibmvnic_send_crq(adapter, &crq); + + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + + if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) + len = + be32_to_cpu(adapter->ras_comps[num].trace_buff_size) - + *ppos; + + copy_to_user(user_buf, &((u8 *)trace)[*ppos], len); + + dma_free_coherent(dev, + be32_to_cpu(adapter->ras_comps[num].trace_buff_size), + trace, trace_tok); + *ppos += len; + return len; +} + +static const struct file_operations trace_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_read, +}; + +static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* 1 or 0 plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t paused_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + adapter->ras_comp_int[num].paused = val ? 1 : 0; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations paused_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = paused_read, + .write = paused_write, +}; + +static ssize_t tracing_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* 1 or 0 plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t tracing_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF; + + return len; +} + +static const struct file_operations tracing_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = tracing_read, + .write = tracing_write, +}; + +static ssize_t error_level_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* decimal max char plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t error_level_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + if (val > 9) + val = 9; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = IBMVNIC_ERROR_LEVEL; + crq.control_ras.level = val; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations error_level_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = error_level_read, + .write = error_level_write, +}; + +static ssize_t trace_level_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* decimal max char plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level); + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t trace_level_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + if (val > 9) + val = 9; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = + adapter->ras_comps[ras_comp_int->num].correlator; + crq.control_ras.op = IBMVNIC_TRACE_LEVEL; + crq.control_ras.level = val; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations trace_level_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_level_read, + .write = trace_level_write, +}; + +static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[9]; /* decimal max int plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size); + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t trace_buff_size_write(struct file *file, + const char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = + adapter->ras_comps[ras_comp_int->num].correlator; + crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ; + /* trace_buff_sz is 3 bytes, stuff an int into it */ + crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5]; + crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6]; + crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7]; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations trace_size_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_buff_size_read, + .write = trace_buff_size_write, +}; + +static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct dentry *dir_ent; + struct dentry *ent; + int i; + + debugfs_remove_recursive(adapter->ras_comps_ent); + + adapter->ras_comps_ent = debugfs_create_dir("ras_comps", + adapter->debugfs_dir); + if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) { + dev_info(dev, "debugfs create ras_comps dir failed\n"); + return; + } + + for (i = 0; i < adapter->ras_comp_num; i++) { + dir_ent = debugfs_create_dir(adapter->ras_comps[i].name, + adapter->ras_comps_ent); + if (!dir_ent || IS_ERR(dir_ent)) { + dev_info(dev, "debugfs create %s dir failed\n", + adapter->ras_comps[i].name); + continue; + } + + adapter->ras_comp_int[i].adapter = adapter; + adapter->ras_comp_int[i].num = i; + adapter->ras_comp_int[i].desc_blob.data = + &adapter->ras_comps[i].description; + adapter->ras_comp_int[i].desc_blob.size = + sizeof(adapter->ras_comps[i].description); + + /* Don't need to remember the dentry's because the debugfs dir + * gets removed recursively + */ + ent = debugfs_create_blob("description", S_IRUGO, dir_ent, + &adapter->ras_comp_int[i].desc_blob); + ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &trace_size_ops); + ent = debugfs_create_file("trace_level", + S_IRUGO | + (adapter->ras_comps[i].trace_level != + 0xFF ? S_IWUSR : 0), + dir_ent, &adapter->ras_comp_int[i], + &trace_level_ops); + ent = debugfs_create_file("error_level", + S_IRUGO | + (adapter-> + ras_comps[i].error_check_level != + 0xFF ? S_IWUSR : 0), + dir_ent, &adapter->ras_comp_int[i], + &trace_level_ops); + ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &tracing_ops); + ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &paused_ops); + ent = debugfs_create_file("trace", S_IRUGO, dir_ent, + &adapter->ras_comp_int[i], + &trace_ops); + } +} + +static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq newcrq; + + adapter->ras_comps = dma_alloc_coherent(dev, len, + &adapter->ras_comps_tok, + GFP_KERNEL); + if (!adapter->ras_comps) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't alloc fw comps buffer\n"); + return; + } + + adapter->ras_comp_int = kmalloc(adapter->ras_comp_num * + sizeof(struct ibmvnic_fw_comp_internal), + GFP_KERNEL); + if (!adapter->ras_comp_int) + dma_free_coherent(dev, len, adapter->ras_comps, + adapter->ras_comps_tok); + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD; + newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS; + newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok); + newcrq.request_ras_comps.len = cpu_to_be32(len); + ibmvnic_send_crq(adapter, &newcrq); +} + +static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + unsigned long flags; + unsigned long flags2; + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_for_each_entry(inflight_cmd, &adapter->inflight, list) { + switch (inflight_cmd->crq.generic.cmd) { + case LOGIN: + dma_unmap_single(dev, adapter->login_buf_token, + adapter->login_buf_sz, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, + DMA_BIDIRECTIONAL); + kfree(adapter->login_rsp_buf); + kfree(adapter->login_buf); + break; + case REQUEST_DUMP: + complete(&adapter->fw_done); + break; + case REQUEST_ERROR_INFO: + spin_lock_irqsave(&adapter->error_list_lock, flags2); + list_for_each_entry(error_buff, &adapter->errors, + list) { + dma_unmap_single(dev, error_buff->dma, + error_buff->len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + list_del(&error_buff->list); + kfree(error_buff); + } + spin_unlock_irqrestore(&adapter->error_list_lock, + flags2); + break; + } + list_del(&inflight_cmd->list); + kfree(inflight_cmd); + } + spin_unlock_irqrestore(&adapter->inflight_lock, flags); +} + +static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_generic_crq *gen_crq = &crq->generic; + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", + ((unsigned long int *)crq)[0], + ((unsigned long int *)crq)[1]); + switch (gen_crq->first) { + case IBMVNIC_CRQ_INIT_RSP: + switch (gen_crq->cmd) { + case IBMVNIC_CRQ_INIT: + dev_info(dev, "Partner initialized\n"); + /* Send back a response */ + rc = ibmvnic_send_crq_init_complete(adapter); + if (rc == 0) + send_version_xchg(adapter); + else + dev_err(dev, "Can't send initrsp rc=%ld\n", rc); + break; + case IBMVNIC_CRQ_INIT_COMPLETE: + dev_info(dev, "Partner initialization complete\n"); + send_version_xchg(adapter); + break; + default: + dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); + } + return; + case IBMVNIC_CRQ_XPORT_EVENT: + if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { + dev_info(dev, "Re-enabling adapter\n"); + adapter->migrated = true; + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } else { + /* The adapter lost the connection */ + dev_err(dev, "Virtual Adapter failed (rc=%d)\n", + gen_crq->cmd); + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + } + return; + case IBMVNIC_CRQ_CMD_RSP: + break; + default: + dev_err(dev, "Got an invalid msg type 0x%02x\n", + gen_crq->first); + return; + } + + switch (gen_crq->cmd) { + case VERSION_EXCHANGE_RSP: + rc = crq->version_exchange_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } + dev_info(dev, "Partner protocol version is %d\n", + crq->version_exchange_rsp.version); + if (be16_to_cpu(crq->version_exchange_rsp.version) < + ibmvnic_version) + ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: + handle_query_cap_rsp(crq, adapter); + break; + case QUERY_MAP_RSP: + handle_query_map_rsp(crq, adapter); + break; + case REQUEST_MAP_RSP: + handle_request_map_rsp(crq, adapter); + break; + case REQUEST_UNMAP_RSP: + handle_request_unmap_rsp(crq, adapter); + break; + case REQUEST_CAPABILITY_RSP: + handle_request_cap_rsp(crq, adapter); + break; + case LOGIN_RSP: + netdev_dbg(netdev, "Got Login Response\n"); + handle_login_rsp(crq, adapter); + break; + case LOGICAL_LINK_STATE_RSP: + netdev_dbg(netdev, "Got Logical Link State Response\n"); + adapter->logical_link_state = + crq->logical_link_state_rsp.link_state; + break; + case LINK_STATE_INDICATION: + netdev_dbg(netdev, "Got Logical Link State Indication\n"); + adapter->phys_link_state = + crq->link_state_indication.phys_link_state; + adapter->logical_link_state = + crq->link_state_indication.logical_link_state; + break; + case CHANGE_MAC_ADDR_RSP: + netdev_dbg(netdev, "Got MAC address change Response\n"); + handle_change_mac_rsp(crq, adapter); + break; + case ERROR_INDICATION: + netdev_dbg(netdev, "Got Error Indication\n"); + handle_error_indication(crq, adapter); + break; + case REQUEST_ERROR_RSP: + netdev_dbg(netdev, "Got Error Detail Response\n"); + handle_error_info_rsp(crq, adapter); + break; + case REQUEST_STATISTICS_RSP: + netdev_dbg(netdev, "Got Statistics Response\n"); + complete(&adapter->stats_done); + break; + case REQUEST_DUMP_SIZE_RSP: + netdev_dbg(netdev, "Got Request Dump Size Response\n"); + handle_dump_size_rsp(crq, adapter); + break; + case REQUEST_DUMP_RSP: + netdev_dbg(netdev, "Got Request Dump Response\n"); + complete(&adapter->fw_done); + break; + case QUERY_IP_OFFLOAD_RSP: + netdev_dbg(netdev, "Got Query IP offload Response\n"); + handle_query_ip_offload_rsp(adapter); + break; + case MULTICAST_CTRL_RSP: + netdev_dbg(netdev, "Got multicast control Response\n"); + break; + case CONTROL_IP_OFFLOAD_RSP: + netdev_dbg(netdev, "Got Control IP offload Response\n"); + dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, + sizeof(adapter->ip_offload_ctrl), + DMA_TO_DEVICE); + /* We're done with the queries, perform the login */ + send_login(adapter); + break; + case REQUEST_RAS_COMP_NUM_RSP: + netdev_dbg(netdev, "Got Request RAS Comp Num Response\n"); + if (crq->request_ras_comp_num_rsp.rc.code == 10) { + netdev_dbg(netdev, "Request RAS Comp Num not supported\n"); + break; + } + adapter->ras_comp_num = + be32_to_cpu(crq->request_ras_comp_num_rsp.num_components); + handle_request_ras_comp_num_rsp(crq, adapter); + break; + case REQUEST_RAS_COMPS_RSP: + netdev_dbg(netdev, "Got Request RAS Comps Response\n"); + handle_request_ras_comps_rsp(crq, adapter); + break; + case CONTROL_RAS_RSP: + netdev_dbg(netdev, "Got Control RAS Response\n"); + handle_control_ras_rsp(crq, adapter); + break; + case COLLECT_FW_TRACE_RSP: + netdev_dbg(netdev, "Got Collect firmware trace Response\n"); + complete(&adapter->fw_done); + break; + default: + netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", + gen_crq->cmd); + } +} + +static irqreturn_t ibmvnic_interrupt(int irq, void *instance) +{ + struct ibmvnic_adapter *adapter = instance; + struct ibmvnic_crq_queue *queue = &adapter->crq; + struct vio_dev *vdev = adapter->vdev; + union ibmvnic_crq *crq; + unsigned long flags; + bool done = false; + + spin_lock_irqsave(&queue->lock, flags); + vio_disable_interrupts(vdev); + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } + vio_enable_interrupts(vdev); + crq = ibmvnic_next_crq(adapter); + if (crq) { + vio_disable_interrupts(vdev); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } else { + done = true; + } + } + spin_unlock_irqrestore(&queue->lock, flags); + return IRQ_HANDLED; +} + +static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct vio_dev *vdev = adapter->vdev; + int rc; + + do { + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); + + return rc; +} + +static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct device *dev = &adapter->vdev->dev; + struct vio_dev *vdev = adapter->vdev; + int rc; + + /* Close the CRQ */ + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); + crq->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_CLOSED) + /* Adapter is good, but other end is not ready */ + dev_warn(dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); + + return rc; +} + +static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct vio_dev *vdev = adapter->vdev; + long rc; + + netdev_dbg(adapter->netdev, "Releasing CRQ\n"); + free_irq(vdev->irq, adapter); + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_page((unsigned long)crq->msgs); +} + +static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct device *dev = &adapter->vdev->dev; + struct vio_dev *vdev = adapter->vdev; + int rc, retrc = -ENOMEM; + + crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); + /* Should we allocate more than one page? */ + + if (!crq->msgs) + return -ENOMEM; + + crq->size = PAGE_SIZE / sizeof(*crq->msgs); + crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, crq->msg_token)) + goto map_failed; + + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_RESOURCE) + /* maybe kexecing and resource is busy. try a reset */ + rc = ibmvnic_reset_crq(adapter); + retrc = rc; + + if (rc == H_CLOSED) { + dev_warn(dev, "Partner adapter not ready\n"); + } else if (rc) { + dev_warn(dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + retrc = 0; + + netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); + rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, + adapter); + if (rc) { + dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", + vdev->irq, rc); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc) { + dev_err(dev, "Error %d enabling interrupts\n", rc); + goto req_irq_failed; + } + + crq->cur = 0; + spin_lock_init(&crq->lock); + + return retrc; + +req_irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_crq_failed: + dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)crq->msgs); + return retrc; +} + +/* debugfs for dump */ +static int ibmvnic_dump_show(struct seq_file *seq, void *v) +{ + struct net_device *netdev = seq->private; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_dump_size.first = IBMVNIC_CRQ_CMD; + crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; + ibmvnic_send_crq(adapter, &crq); + + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + + seq_write(seq, adapter->dump_data, adapter->dump_data_size); + + dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size, + DMA_BIDIRECTIONAL); + + kfree(adapter->dump_data); + + return 0; +} + +static int ibmvnic_dump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ibmvnic_dump_show, inode->i_private); +} + +static const struct file_operations ibmvnic_dump_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) +{ + struct ibmvnic_adapter *adapter; + struct net_device *netdev; + unsigned char *mac_addr_p; + struct dentry *ent; + char buf[16]; /* debugfs name buf */ + int rc; + + dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", + dev->unit_address); + + mac_addr_p = (unsigned char *)vio_get_attribute(dev, + VETH_MAC_ADDR, NULL); + if (!mac_addr_p) { + dev_err(&dev->dev, + "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", + __FILE__, __LINE__); + return 0; + } + + netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), + IBMVNIC_MAX_TX_QUEUES); + if (!netdev) + return -ENOMEM; + + adapter = netdev_priv(netdev); + dev_set_drvdata(&dev->dev, netdev); + adapter->vdev = dev; + adapter->netdev = netdev; + + ether_addr_copy(adapter->mac_addr, mac_addr_p); + ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + netdev->irq = dev->irq; + netdev->netdev_ops = &ibmvnic_netdev_ops; + netdev->ethtool_ops = &ibmvnic_ethtool_ops; + SET_NETDEV_DEV(netdev, &dev->dev); + + spin_lock_init(&adapter->stats_lock); + + rc = ibmvnic_init_crq_queue(adapter); + if (rc) { + dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc); + goto free_netdev; + } + + INIT_LIST_HEAD(&adapter->errors); + INIT_LIST_HEAD(&adapter->inflight); + spin_lock_init(&adapter->error_list_lock); + spin_lock_init(&adapter->inflight_lock); + + adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats, + sizeof(struct ibmvnic_statistics), + DMA_FROM_DEVICE); + if (dma_mapping_error(&dev->dev, adapter->stats_token)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(&dev->dev, "Couldn't map stats buffer\n"); + goto free_crq; + } + + snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address); + ent = debugfs_create_dir(buf, NULL); + if (!ent || IS_ERR(ent)) { + dev_info(&dev->dev, "debugfs create directory failed\n"); + adapter->debugfs_dir = NULL; + } else { + adapter->debugfs_dir = ent; + ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir, + netdev, &ibmvnic_dump_ops); + if (!ent || IS_ERR(ent)) { + dev_info(&dev->dev, + "debugfs create dump file failed\n"); + adapter->debugfs_dump = NULL; + } else { + adapter->debugfs_dump = ent; + } + } + ibmvnic_send_crq_init(adapter); + + init_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + /* needed to pull init_sub_crqs outside of an interrupt context + * because it creates IRQ mappings for the subCRQ queues, causing + * a kernel warning + */ + init_sub_crqs(adapter, 0); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + /* if init_sub_crqs is partially successful, retry */ + while (!adapter->tx_scrq || !adapter->rx_scrq) { + init_sub_crqs(adapter, 1); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + } + + netdev->real_num_tx_queues = adapter->req_tx_queues; + + rc = register_netdev(netdev); + if (rc) { + dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + goto free_debugfs; + } + dev_info(&dev->dev, "ibmvnic registered\n"); + + return 0; + +free_debugfs: + if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) + debugfs_remove_recursive(adapter->debugfs_dir); +free_crq: + ibmvnic_release_crq_queue(adapter); +free_netdev: + free_netdev(netdev); + return rc; +} + +static int ibmvnic_remove(struct vio_dev *dev) +{ + struct net_device *netdev = dev_get_drvdata(&dev->dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + + release_sub_crqs(adapter); + + ibmvnic_release_crq_queue(adapter); + + if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) + debugfs_remove_recursive(adapter->debugfs_dir); + + if (adapter->ras_comps) + dma_free_coherent(&dev->dev, + adapter->ras_comp_num * + sizeof(struct ibmvnic_fw_component), + adapter->ras_comps, adapter->ras_comps_tok); + + kfree(adapter->ras_comp_int); + + free_netdev(netdev); + dev_set_drvdata(&dev->dev, NULL); + + return 0; +} + +static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) +{ + struct net_device *netdev = dev_get_drvdata(&vdev->dev); + struct ibmvnic_adapter *adapter; + struct iommu_table *tbl; + unsigned long ret = 0; + int i; + + tbl = get_iommu_table_base(&vdev->dev); + + /* netdev inits at probe time along with the structures we need below*/ + if (!netdev) + return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); + + adapter = netdev_priv(netdev); + + ret += PAGE_SIZE; /* the crq message queue */ + ret += adapter->bounce_buffer_size; + ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); + + for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) + ret += 4 * PAGE_SIZE; /* the scrq message queue */ + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) + ret += adapter->rx_pool[i].size * + IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); + + return ret; +} + +static int ibmvnic_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int i; + + /* kick the interrupt handlers just in case we lost an interrupt */ + for (i = 0; i < adapter->req_rx_queues; i++) + ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); + + return 0; +} + +static struct vio_device_id ibmvnic_device_table[] = { + {"network", "IBM,vnic"}, + {"", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); + +static const struct dev_pm_ops ibmvnic_pm_ops = { + .resume = ibmvnic_resume +}; + +static struct vio_driver ibmvnic_driver = { + .id_table = ibmvnic_device_table, + .probe = ibmvnic_probe, + .remove = ibmvnic_remove, + .get_desired_dma = ibmvnic_get_desired_dma, + .name = ibmvnic_driver_name, + .pm = &ibmvnic_pm_ops, +}; + +/* module functions */ +static int __init ibmvnic_module_init(void) +{ + pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, + IBMVNIC_DRIVER_VERSION); + + return vio_register_driver(&ibmvnic_driver); +} + +static void __exit ibmvnic_module_exit(void) +{ + vio_unregister_driver(&ibmvnic_driver); +} + +module_init(ibmvnic_module_init); +module_exit(ibmvnic_module_exit); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h new file mode 100644 index 000000000000..1242925ad34c --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -0,0 +1,1046 @@ +/**************************************************************************/ +/* */ +/* IBM System i and System p Virtual NIC Device Driver */ +/* Copyright (C) 2014 IBM Corp. */ +/* Santiago Leon (santi_leon@yahoo.com) */ +/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ +/* John Allen (jallen@linux.vnet.ibm.com) */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. */ +/* */ +/* This module contains the implementation of a virtual ethernet device */ +/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ +/* option of the RS/6000 Platform Architecture to interface with virtual */ +/* ethernet NICs that are presented to the partition by the hypervisor. */ +/* */ +/**************************************************************************/ + +#define IBMVNIC_NAME "ibmvnic" +#define IBMVNIC_DRIVER_VERSION "1.0" +#define IBMVNIC_INVALID_MAP -1 +#define IBMVNIC_STATS_TIMEOUT 1 +/* basic structures plus 100 2k buffers */ +#define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 + +/* Initial module_parameters */ +#define IBMVNIC_RX_WEIGHT 16 +/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ +#define IBMVNIC_BUFFS_PER_POOL 100 +#define IBMVNIC_MAX_TX_QUEUES 5 + +struct ibmvnic_login_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_LB 1 + __be32 num_txcomp_subcrqs; + __be32 off_txcomp_subcrqs; + __be32 num_rxcomp_subcrqs; + __be32 off_rxcomp_subcrqs; + __be32 login_rsp_ioba; + __be32 login_rsp_len; +} __packed __aligned(8); + +struct ibmvnic_login_rsp_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_LRB 1 + __be32 num_txsubm_subcrqs; + __be32 off_txsubm_subcrqs; + __be32 num_rxadd_subcrqs; + __be32 off_rxadd_subcrqs; + __be32 off_rxadd_buff_size; + __be32 num_supp_tx_desc; + __be32 off_supp_tx_desc; +} __packed __aligned(8); + +struct ibmvnic_query_ip_offload_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 ipv4_chksum; + u8 ipv6_chksum; + u8 tcp_ipv4_chksum; + u8 tcp_ipv6_chksum; + u8 udp_ipv4_chksum; + u8 udp_ipv6_chksum; + u8 large_tx_ipv4; + u8 large_tx_ipv6; + u8 large_rx_ipv4; + u8 large_rx_ipv6; + u8 reserved1[14]; + __be16 max_ipv4_header_size; + __be16 max_ipv6_header_size; + __be16 max_tcp_header_size; + __be16 max_udp_header_size; + __be32 max_large_tx_size; + __be32 max_large_rx_size; + u8 reserved2[16]; + u8 ipv6_extension_header; +#define IPV6_EH_NOT_SUPPORTED 0x00 +#define IPV6_EH_SUPPORTED_LIM 0x01 +#define IPV6_EH_SUPPORTED 0xFF + u8 tcp_pseudosum_req; +#define TCP_PS_NOT_REQUIRED 0x00 +#define TCP_PS_REQUIRED 0x01 + u8 reserved3[30]; + __be16 num_ipv6_ext_headers; + __be32 off_ipv6_ext_headers; + u8 reserved4[154]; +} __packed __aligned(8); + +struct ibmvnic_control_ip_offload_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 ipv4_chksum; + u8 ipv6_chksum; + u8 tcp_ipv4_chksum; + u8 tcp_ipv6_chksum; + u8 udp_ipv4_chksum; + u8 udp_ipv6_chksum; + u8 large_tx_ipv4; + u8 large_tx_ipv6; + u8 bad_packet_rx; + u8 large_rx_ipv4; + u8 large_rx_ipv6; + u8 reserved4[111]; +} __packed __aligned(8); + +struct ibmvnic_fw_component { + u8 name[48]; + __be32 trace_buff_size; + u8 correlator; + u8 trace_level; + u8 parent_correlator; + u8 error_check_level; + u8 trace_on; + u8 reserved[7]; + u8 description[192]; +} __packed __aligned(8); + +struct ibmvnic_fw_trace_entry { + __be32 trace_id; + u8 num_valid_data; + u8 reserved[3]; + __be64 pmc_registers; + __be64 timebase; + __be64 trace_data[5]; +} __packed __aligned(8); + +struct ibmvnic_statistics { + __be32 version; + __be32 promiscuous; + __be64 rx_packets; + __be64 rx_bytes; + __be64 tx_packets; + __be64 tx_bytes; + __be64 ucast_tx_packets; + __be64 ucast_rx_packets; + __be64 mcast_tx_packets; + __be64 mcast_rx_packets; + __be64 bcast_tx_packets; + __be64 bcast_rx_packets; + __be64 align_errors; + __be64 fcs_errors; + __be64 single_collision_frames; + __be64 multi_collision_frames; + __be64 sqe_test_errors; + __be64 deferred_tx; + __be64 late_collisions; + __be64 excess_collisions; + __be64 internal_mac_tx_errors; + __be64 carrier_sense; + __be64 too_long_frames; + __be64 internal_mac_rx_errors; + u8 reserved[72]; +} __packed __aligned(8); + +struct ibmvnic_acl_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 mac_acls_restrict; + u8 vlan_acls_restrict; + u8 reserved1[22]; + __be32 num_mac_addrs; + __be32 offset_mac_addrs; + __be32 num_vlan_ids; + __be32 offset_vlan_ids; + u8 reserved2[80]; +} __packed __aligned(8); + +/* descriptors have been changed, how should this be defined? 1? 4? */ + +#define IBMVNIC_TX_DESC_VERSIONS 3 + +/* is this still needed? */ +struct ibmvnic_tx_comp_desc { + u8 first; + u8 num_comps; + __be16 rcs[5]; + __be32 correlators[5]; +} __packed __aligned(8); + +/* some flags that included in v0 descriptor, which is gone + * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM + * and only in some offload_flags variable that doesn't seem + * to be used anywhere, can probably be removed? + */ + +#define IBMVNIC_TCP_CHKSUM 0x20 +#define IBMVNIC_UDP_CHKSUM 0x08 + +#define IBMVNIC_MAX_FRAGS_PER_CRQ 3 + +struct ibmvnic_tx_desc { + u8 first; + u8 type; + +#define IBMVNIC_TX_DESC 0x10 + u8 n_crq_elem; + u8 n_sge; + u8 flags1; +#define IBMVNIC_TX_COMP_NEEDED 0x80 +#define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40 +#define IBMVNIC_TX_LSO 0x20 +#define IBMVNIC_TX_PROT_TCP 0x10 +#define IBMVNIC_TX_PROT_UDP 0x08 +#define IBMVNIC_TX_PROT_IPV4 0x04 +#define IBMVNIC_TX_PROT_IPV6 0x02 +#define IBMVNIC_TX_VLAN_PRESENT 0x01 + u8 flags2; +#define IBMVNIC_TX_VLAN_INSERT 0x80 + __be16 mss; + u8 reserved[4]; + __be32 correlator; + __be16 vlan_id; + __be16 dma_reg; + __be32 sge_len; + __be64 ioba; +} __packed __aligned(8); + +struct ibmvnic_hdr_desc { + u8 first; + u8 type; +#define IBMVNIC_HDR_DESC 0x11 + u8 len; + u8 l2_len; + __be16 l3_len; + u8 l4_len; + u8 flag; + u8 data[24]; +} __packed __aligned(8); + +struct ibmvnic_hdr_ext_desc { + u8 first; + u8 type; +#define IBMVNIC_HDR_EXT_DESC 0x12 + u8 len; + u8 data[29]; +} __packed __aligned(8); + +struct ibmvnic_sge_desc { + u8 first; + u8 type; +#define IBMVNIC_SGE_DESC 0x30 + __be16 sge1_dma_reg; + __be32 sge1_len; + __be64 sge1_ioba; + __be16 reserved; + __be16 sge2_dma_reg; + __be32 sge2_len; + __be64 sge2_ioba; +} __packed __aligned(8); + +struct ibmvnic_rx_comp_desc { + u8 first; + u8 flags; +#define IBMVNIC_IP_CHKSUM_GOOD 0x80 +#define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40 +#define IBMVNIC_END_FRAME 0x20 +#define IBMVNIC_EXACT_MC 0x10 +#define IBMVNIC_VLAN_STRIPPED 0x08 + __be16 off_frame_data; + __be32 len; + __be64 correlator; + __be16 vlan_tci; + __be16 rc; + u8 reserved[12]; +} __packed __aligned(8); + +struct ibmvnic_generic_scrq { + u8 first; + u8 reserved[31]; +} __packed __aligned(8); + +struct ibmvnic_rx_buff_add_desc { + u8 first; + u8 reserved[7]; + __be64 correlator; + __be32 ioba; + u8 map_id; + __be32 len:24; + u8 reserved2[8]; +} __packed __aligned(8); + +struct ibmvnic_rc { + u8 code; /* one of enum ibmvnic_rc_codes */ + u8 detailed_data[3]; +} __packed __aligned(4); + +struct ibmvnic_generic_crq { + u8 first; + u8 cmd; + u8 params[10]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_version_exchange { + u8 first; + u8 cmd; + __be16 version; +#define IBMVNIC_INITIAL_VERSION 1 + u8 reserved[8]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_capability { + u8 first; + u8 cmd; + __be16 capability; /* one of ibmvnic_capabilities */ + struct ibmvnic_rc rc; + __be32 number; /*FIX: should be __be64, but I'm getting the least + * significant word first + */ +} __packed __aligned(8); + +struct ibmvnic_login { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 ioba; + __be32 len; +} __packed __aligned(8); + +struct ibmvnic_phys_parms { + u8 first; + u8 cmd; + u8 flags1; +#define IBMVNIC_EXTERNAL_LOOPBACK 0x80 +#define IBMVNIC_INTERNAL_LOOPBACK 0x40 +#define IBMVNIC_PROMISC 0x20 +#define IBMVNIC_PHYS_LINK_ACTIVE 0x10 +#define IBMVNIC_AUTONEG_DUPLEX 0x08 +#define IBMVNIC_FULL_DUPLEX 0x04 +#define IBMVNIC_HALF_DUPLEX 0x02 +#define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01 + u8 flags2; +#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 + __be32 speed; +#define IBMVNIC_AUTONEG 0x80 +#define IBMVNIC_10MBPS 0x40 +#define IBMVNIC_100MBPS 0x20 +#define IBMVNIC_1GBPS 0x10 +#define IBMVNIC_10GBPS 0x08 + __be32 mtu; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_logical_link_state { + u8 first; + u8 cmd; + u8 link_state; +#define IBMVNIC_LOGICAL_LNK_DN 0x00 +#define IBMVNIC_LOGICAL_LNK_UP 0x01 +#define IBMVNIC_LOGICAL_LNK_QUERY 0xff + u8 reserved[9]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_query_ip_offload { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 len; + __be32 ioba; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_control_ip_offload { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_dump_size { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_dump { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_dump_rsp { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 dumped_len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_ras_comp_num { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 num_components; + u8 reserved2[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_ras_comps { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_control_ras { + u8 first; + u8 cmd; + u8 correlator; + u8 level; + u8 op; +#define IBMVNIC_TRACE_LEVEL 1 +#define IBMVNIC_ERROR_LEVEL 2 +#define IBMVNIC_TRACE_PAUSE 3 +#define IBMVNIC_TRACE_RESUME 4 +#define IBMVNIC_TRACE_ON 5 +#define IBMVNIC_TRACE_OFF 6 +#define IBMVNIC_CHG_TRACE_BUFF_SZ 7 + u8 trace_buff_sz[3]; + u8 reserved[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_collect_fw_trace { + u8 first; + u8 cmd; + u8 correlator; + u8 reserved; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_statistics { + u8 first; + u8 cmd; + u8 flags; +#define IBMVNIC_PHYSICAL_PORT 0x80 + u8 reserved1; + __be32 ioba; + __be32 len; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_request_debug_stats { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_error_indication { + u8 first; + u8 cmd; + u8 flags; +#define IBMVNIC_FATAL_ERROR 0x80 + u8 reserved1; + __be32 error_id; + __be32 detail_error_sz; + __be16 error_cause; + u8 reserved2[2]; +} __packed __aligned(8); + +struct ibmvnic_request_error_info { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + __be32 error_id; +} __packed __aligned(8); + +struct ibmvnic_request_error_rsp { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 error_id; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_link_state_indication { + u8 first; + u8 cmd; + u8 reserved1[2]; + u8 phys_link_state; + u8 logical_link_state; + u8 reserved2[10]; +} __packed __aligned(8); + +struct ibmvnic_change_mac_addr { + u8 first; + u8 cmd; + u8 mac_addr[6]; + struct ibmvnic_rc rc; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_multicast_ctrl { + u8 first; + u8 cmd; + u8 mac_addr[6]; + u8 flags; +#define IBMVNIC_ENABLE_MC 0x80 +#define IBMVNIC_DISABLE_MC 0x40 +#define IBMVNIC_ENABLE_ALL 0x20 +#define IBMVNIC_DISABLE_ALL 0x10 + u8 reserved1; + __be16 reserved2; /* was num_enabled_mc_addr; */ + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_get_vpd_size_rsp { + u8 first; + u8 cmd; + u8 reserved[2]; + __be64 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_get_vpd { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_acl_change_indication { + u8 first; + u8 cmd; + __be16 change_type; +#define IBMVNIC_MAC_ACL 0 +#define IBMVNIC_VLAN_ACL 1 + u8 reserved[12]; +} __packed __aligned(8); + +struct ibmvnic_acl_query { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_tune { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_map { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_map_rsp { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_unmap { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[12]; +} __packed __aligned(8); + +struct ibmvnic_request_unmap_rsp { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[8]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_query_map { + u8 first; + u8 cmd; + u8 reserved[14]; +} __packed __aligned(8); + +struct ibmvnic_query_map_rsp { + u8 first; + u8 cmd; + u8 reserved; + u8 page_size; + __be32 tot_pages; + __be32 free_pages; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +union ibmvnic_crq { + struct ibmvnic_generic_crq generic; + struct ibmvnic_version_exchange version_exchange; + struct ibmvnic_version_exchange version_exchange_rsp; + struct ibmvnic_capability query_capability; + struct ibmvnic_capability query_capability_rsp; + struct ibmvnic_capability request_capability; + struct ibmvnic_capability request_capability_rsp; + struct ibmvnic_login login; + struct ibmvnic_generic_crq login_rsp; + struct ibmvnic_phys_parms query_phys_parms; + struct ibmvnic_phys_parms query_phys_parms_rsp; + struct ibmvnic_phys_parms query_phys_capabilities; + struct ibmvnic_phys_parms query_phys_capabilities_rsp; + struct ibmvnic_phys_parms set_phys_parms; + struct ibmvnic_phys_parms set_phys_parms_rsp; + struct ibmvnic_logical_link_state logical_link_state; + struct ibmvnic_logical_link_state logical_link_state_rsp; + struct ibmvnic_query_ip_offload query_ip_offload; + struct ibmvnic_query_ip_offload query_ip_offload_rsp; + struct ibmvnic_control_ip_offload control_ip_offload; + struct ibmvnic_control_ip_offload control_ip_offload_rsp; + struct ibmvnic_request_dump_size request_dump_size; + struct ibmvnic_request_dump_size request_dump_size_rsp; + struct ibmvnic_request_dump request_dump; + struct ibmvnic_request_dump_rsp request_dump_rsp; + struct ibmvnic_request_ras_comp_num request_ras_comp_num; + struct ibmvnic_request_ras_comp_num request_ras_comp_num_rsp; + struct ibmvnic_request_ras_comps request_ras_comps; + struct ibmvnic_request_ras_comps request_ras_comps_rsp; + struct ibmvnic_control_ras control_ras; + struct ibmvnic_control_ras control_ras_rsp; + struct ibmvnic_collect_fw_trace collect_fw_trace; + struct ibmvnic_collect_fw_trace collect_fw_trace_rsp; + struct ibmvnic_request_statistics request_statistics; + struct ibmvnic_generic_crq request_statistics_rsp; + struct ibmvnic_request_debug_stats request_debug_stats; + struct ibmvnic_request_debug_stats request_debug_stats_rsp; + struct ibmvnic_error_indication error_indication; + struct ibmvnic_request_error_info request_error_info; + struct ibmvnic_request_error_rsp request_error_rsp; + struct ibmvnic_link_state_indication link_state_indication; + struct ibmvnic_change_mac_addr change_mac_addr; + struct ibmvnic_change_mac_addr change_mac_addr_rsp; + struct ibmvnic_multicast_ctrl multicast_ctrl; + struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; + struct ibmvnic_generic_crq get_vpd_size; + struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; + struct ibmvnic_get_vpd get_vpd; + struct ibmvnic_generic_crq get_vpd_rsp; + struct ibmvnic_acl_change_indication acl_change_indication; + struct ibmvnic_acl_query acl_query; + struct ibmvnic_generic_crq acl_query_rsp; + struct ibmvnic_tune tune; + struct ibmvnic_generic_crq tune_rsp; + struct ibmvnic_request_map request_map; + struct ibmvnic_request_map_rsp request_map_rsp; + struct ibmvnic_request_unmap request_unmap; + struct ibmvnic_request_unmap_rsp request_unmap_rsp; + struct ibmvnic_query_map query_map; + struct ibmvnic_query_map_rsp query_map_rsp; +}; + +enum ibmvnic_rc_codes { + SUCCESS = 0, + PARTIALSUCCESS = 1, + PERMISSION = 2, + NOMEMORY = 3, + PARAMETER = 4, + UNKNOWNCOMMAND = 5, + ABORTED = 6, + INVALIDSTATE = 7, + INVALIDIOBA = 8, + INVALIDLENGTH = 9, + UNSUPPORTEDOPTION = 10, +}; + +enum ibmvnic_capabilities { + MIN_TX_QUEUES = 1, + MIN_RX_QUEUES = 2, + MIN_RX_ADD_QUEUES = 3, + MAX_TX_QUEUES = 4, + MAX_RX_QUEUES = 5, + MAX_RX_ADD_QUEUES = 6, + REQ_TX_QUEUES = 7, + REQ_RX_QUEUES = 8, + REQ_RX_ADD_QUEUES = 9, + MIN_TX_ENTRIES_PER_SUBCRQ = 10, + MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11, + MAX_TX_ENTRIES_PER_SUBCRQ = 12, + MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13, + REQ_TX_ENTRIES_PER_SUBCRQ = 14, + REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15, + TCP_IP_OFFLOAD = 16, + PROMISC_REQUESTED = 17, + PROMISC_SUPPORTED = 18, + MIN_MTU = 19, + MAX_MTU = 20, + REQ_MTU = 21, + MAX_MULTICAST_FILTERS = 22, + VLAN_HEADER_INSERTION = 23, + MAX_TX_SG_ENTRIES = 25, + RX_SG_SUPPORTED = 26, + RX_SG_REQUESTED = 27, + OPT_TX_COMP_SUB_QUEUES = 28, + OPT_RX_COMP_QUEUES = 29, + OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30, + OPT_TX_ENTRIES_PER_SUBCRQ = 31, + OPT_RXBA_ENTRIES_PER_SUBCRQ = 32, + TX_RX_DESC_REQ = 33, +}; + +enum ibmvnic_error_cause { + ADAPTER_PROBLEM = 0, + BUS_PROBLEM = 1, + FW_PROBLEM = 2, + DD_PROBLEM = 3, + EEH_RECOVERY = 4, + FW_UPDATED = 5, + LOW_MEMORY = 6, +}; + +enum ibmvnic_commands { + VERSION_EXCHANGE = 0x01, + VERSION_EXCHANGE_RSP = 0x81, + QUERY_CAPABILITY = 0x02, + QUERY_CAPABILITY_RSP = 0x82, + REQUEST_CAPABILITY = 0x03, + REQUEST_CAPABILITY_RSP = 0x83, + LOGIN = 0x04, + LOGIN_RSP = 0x84, + QUERY_PHYS_PARMS = 0x05, + QUERY_PHYS_PARMS_RSP = 0x85, + QUERY_PHYS_CAPABILITIES = 0x06, + QUERY_PHYS_CAPABILITIES_RSP = 0x86, + SET_PHYS_PARMS = 0x07, + SET_PHYS_PARMS_RSP = 0x87, + ERROR_INDICATION = 0x08, + REQUEST_ERROR_INFO = 0x09, + REQUEST_ERROR_RSP = 0x89, + REQUEST_DUMP_SIZE = 0x0A, + REQUEST_DUMP_SIZE_RSP = 0x8A, + REQUEST_DUMP = 0x0B, + REQUEST_DUMP_RSP = 0x8B, + LOGICAL_LINK_STATE = 0x0C, + LOGICAL_LINK_STATE_RSP = 0x8C, + REQUEST_STATISTICS = 0x0D, + REQUEST_STATISTICS_RSP = 0x8D, + REQUEST_RAS_COMP_NUM = 0x0E, + REQUEST_RAS_COMP_NUM_RSP = 0x8E, + REQUEST_RAS_COMPS = 0x0F, + REQUEST_RAS_COMPS_RSP = 0x8F, + CONTROL_RAS = 0x10, + CONTROL_RAS_RSP = 0x90, + COLLECT_FW_TRACE = 0x11, + COLLECT_FW_TRACE_RSP = 0x91, + LINK_STATE_INDICATION = 0x12, + CHANGE_MAC_ADDR = 0x13, + CHANGE_MAC_ADDR_RSP = 0x93, + MULTICAST_CTRL = 0x14, + MULTICAST_CTRL_RSP = 0x94, + GET_VPD_SIZE = 0x15, + GET_VPD_SIZE_RSP = 0x95, + GET_VPD = 0x16, + GET_VPD_RSP = 0x96, + TUNE = 0x17, + TUNE_RSP = 0x97, + QUERY_IP_OFFLOAD = 0x18, + QUERY_IP_OFFLOAD_RSP = 0x98, + CONTROL_IP_OFFLOAD = 0x19, + CONTROL_IP_OFFLOAD_RSP = 0x99, + ACL_CHANGE_INDICATION = 0x1A, + ACL_QUERY = 0x1B, + ACL_QUERY_RSP = 0x9B, + REQUEST_DEBUG_STATS = 0x1C, + REQUEST_DEBUG_STATS_RSP = 0x9C, + QUERY_MAP = 0x1D, + QUERY_MAP_RSP = 0x9D, + REQUEST_MAP = 0x1E, + REQUEST_MAP_RSP = 0x9E, + REQUEST_UNMAP = 0x1F, + REQUEST_UNMAP_RSP = 0x9F, + VLAN_CTRL = 0x20, + VLAN_CTRL_RSP = 0xA0, +}; + +enum ibmvnic_crq_type { + IBMVNIC_CRQ_CMD = 0x80, + IBMVNIC_CRQ_CMD_RSP = 0x80, + IBMVNIC_CRQ_INIT_CMD = 0xC0, + IBMVNIC_CRQ_INIT_RSP = 0xC0, + IBMVNIC_CRQ_XPORT_EVENT = 0xFF, +}; + +enum ibmvfc_crq_format { + IBMVNIC_CRQ_INIT = 0x01, + IBMVNIC_CRQ_INIT_COMPLETE = 0x02, + IBMVNIC_PARTITION_MIGRATED = 0x06, +}; + +struct ibmvnic_crq_queue { + union ibmvnic_crq *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +union sub_crq { + struct ibmvnic_generic_scrq generic; + struct ibmvnic_tx_comp_desc tx_comp; + struct ibmvnic_tx_desc v1; + struct ibmvnic_hdr_desc hdr; + struct ibmvnic_hdr_ext_desc hdr_ext; + struct ibmvnic_sge_desc sge; + struct ibmvnic_rx_comp_desc rx_comp; + struct ibmvnic_rx_buff_add_desc rx_add; +}; + +struct ibmvnic_sub_crq_queue { + union sub_crq *msgs; + int size, cur; + dma_addr_t msg_token; + unsigned long crq_num; + unsigned long hw_irq; + unsigned int irq; + unsigned int pool_index; + int scrq_num; + spinlock_t lock; + struct sk_buff *rx_skb_top; + struct ibmvnic_adapter *adapter; +}; + +struct ibmvnic_long_term_buff { + unsigned char *buff; + dma_addr_t addr; + u64 size; + u8 map_id; +}; + +struct ibmvnic_tx_buff { + struct sk_buff *skb; + dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ]; + unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ]; + int index; + int pool_index; + bool last_frag; + bool used_bounce; +}; + +struct ibmvnic_tx_pool { + struct ibmvnic_tx_buff *tx_buff; + int *free_map; + int consumer_index; + int producer_index; + wait_queue_head_t ibmvnic_tx_comp_q; + struct task_struct *work_thread; + struct ibmvnic_long_term_buff long_term_buff; +}; + +struct ibmvnic_rx_buff { + struct sk_buff *skb; + dma_addr_t dma; + unsigned char *data; + int size; + int pool_index; +}; + +struct ibmvnic_rx_pool { + struct ibmvnic_rx_buff *rx_buff; + int size; + int index; + int buff_size; + atomic_t available; + int *free_map; + int next_free; + int next_alloc; + int active; + struct ibmvnic_long_term_buff long_term_buff; +}; + +struct ibmvnic_error_buff { + char *buff; + dma_addr_t dma; + int len; + struct list_head list; + __be32 error_id; +}; + +struct ibmvnic_fw_comp_internal { + struct ibmvnic_adapter *adapter; + int num; + struct debugfs_blob_wrapper desc_blob; + int paused; +}; + +struct ibmvnic_inflight_cmd { + union ibmvnic_crq crq; + struct list_head list; +}; + +struct ibmvnic_adapter { + struct vio_dev *vdev; + struct net_device *netdev; + struct ibmvnic_crq_queue crq; + u8 mac_addr[ETH_ALEN]; + struct ibmvnic_query_ip_offload_buffer ip_offload_buf; + dma_addr_t ip_offload_tok; + struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; + dma_addr_t ip_offload_ctrl_tok; + bool migrated; + u32 msg_enable; + void *bounce_buffer; + int bounce_buffer_size; + dma_addr_t bounce_buffer_dma; + + /* Statistics */ + struct net_device_stats net_stats; + struct ibmvnic_statistics stats; + dma_addr_t stats_token; + struct completion stats_done; + spinlock_t stats_lock; + int replenish_no_mem; + int replenish_add_buff_success; + int replenish_add_buff_failure; + int replenish_task_cycles; + int tx_send_failed; + int tx_map_failed; + + int phys_link_state; + int logical_link_state; + + /* login data */ + struct ibmvnic_login_buffer *login_buf; + dma_addr_t login_buf_token; + int login_buf_sz; + + struct ibmvnic_login_rsp_buffer *login_rsp_buf; + dma_addr_t login_rsp_buf_token; + int login_rsp_buf_sz; + + atomic_t running_cap_queries; + + struct ibmvnic_sub_crq_queue **tx_scrq; + struct ibmvnic_sub_crq_queue **rx_scrq; + int requested_caps; + + /* rx structs */ + struct napi_struct *napi; + struct ibmvnic_rx_pool *rx_pool; + u64 promisc; + + struct ibmvnic_tx_pool *tx_pool; + bool closing; + struct completion init_done; + + struct list_head errors; + spinlock_t error_list_lock; + + /* debugfs */ + struct dentry *debugfs_dir; + struct dentry *debugfs_dump; + struct completion fw_done; + char *dump_data; + dma_addr_t dump_data_token; + int dump_data_size; + int ras_comp_num; + struct ibmvnic_fw_component *ras_comps; + struct ibmvnic_fw_comp_internal *ras_comp_int; + dma_addr_t ras_comps_tok; + struct dentry *ras_comps_ent; + + /* in-flight commands that allocate and/or map memory*/ + struct list_head inflight; + spinlock_t inflight_lock; + + /* partner capabilities */ + u64 min_tx_queues; + u64 min_rx_queues; + u64 min_rx_add_queues; + u64 max_tx_queues; + u64 max_rx_queues; + u64 max_rx_add_queues; + u64 req_tx_queues; + u64 req_rx_queues; + u64 req_rx_add_queues; + u64 min_tx_entries_per_subcrq; + u64 min_rx_add_entries_per_subcrq; + u64 max_tx_entries_per_subcrq; + u64 max_rx_add_entries_per_subcrq; + u64 req_tx_entries_per_subcrq; + u64 req_rx_add_entries_per_subcrq; + u64 tcp_ip_offload; + u64 promisc_requested; + u64 promisc_supported; + u64 min_mtu; + u64 max_mtu; + u64 req_mtu; + u64 max_multicast_filters; + u64 vlan_header_insertion; + u64 max_tx_sg_entries; + u64 rx_sg_supported; + u64 rx_sg_requested; + u64 opt_tx_comp_sub_queues; + u64 opt_rx_comp_queues; + u64 opt_rx_bufadd_q_per_rx_comp_q; + u64 opt_tx_entries_per_subcrq; + u64 opt_rxba_entries_per_subcrq; + __be64 tx_rx_desc_req; + u8 map_id; +}; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 4163b16489b3..fa593dd3efe1 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -280,6 +280,16 @@ config I40E_VXLAN Say Y here if you want to use Virtual eXtensible Local Area Network (VXLAN) in the driver. +config I40E_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) Support" + depends on I40E && GENEVE && !(I40E=y && GENEVE=m) + default n + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use GENEVE in the driver. + config I40E_DCB bool "Data Center Bridging (DCB) Support" default n diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 69707108d23c..98fe5a2cd6e3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -213,8 +213,11 @@ struct e1000_rx_ring { }; #define E1000_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) \ - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index b1af0d613caa..8172cf08cc33 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,5 +1,5 @@ /******************************************************************************* - +* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2006 Intel Corporation. @@ -106,7 +106,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { 120, 120 }; -static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_MUTEX(e1000_eeprom_lock); static DEFINE_SPINLOCK(e1000_phy_lock); /** @@ -624,8 +624,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) /* Workaround for PCI-X problem when BIOS sets MMRBC * incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix - && e1000_pcix_get_mmrbc(hw) > 2048) + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) e1000_pcix_set_mmrbc(hw, 2048); break; } @@ -683,10 +683,9 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) } ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, - &eeprom_data); - if (ret_val) { + &eeprom_data); + if (ret_val) return ret_val; - } if (eeprom_data != EEPROM_RESERVED_WORD) { /* Adjust SERDES output amplitude only. */ @@ -1074,8 +1073,8 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) if (hw->mac_type <= e1000_82543 || hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) hw->phy_reset_disable = false; return E1000_SUCCESS; @@ -1652,7 +1651,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_1000t_ctrl_reg = 0; } else { ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, - mii_1000t_ctrl_reg); + mii_1000t_ctrl_reg); if (ret_val) return ret_val; } @@ -1881,10 +1880,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) - && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ret_val = e1000_polarity_reversal_workaround(hw); if (ret_val) return ret_val; @@ -2084,11 +2084,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ - if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_internal_serdes) - && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_copper) - && (!hw->autoneg))) { + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { ret_val = e1000_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); @@ -2193,8 +2194,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_TX_PAUSE; e_dbg ("Flow Control = TX PAUSE frames only.\n"); @@ -2210,8 +2210,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_RX_PAUSE; e_dbg ("Flow Control = RX PAUSE frames only.\n"); @@ -2460,10 +2459,11 @@ s32 e1000_check_for_link(struct e1000_hw *hw) * happen due to the execution of this workaround. */ - if ((hw->mac_type == e1000_82544 - || hw->mac_type == e1000_82543) && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ew32(IMC, 0xffffffff); ret_val = e1000_polarity_reversal_workaround(hw); @@ -2528,8 +2528,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) */ if (hw->tbi_compatibility_en) { u16 speed, duplex; + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { e_dbg ("Error getting link speed and duplex\n"); @@ -2628,10 +2630,10 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); if (ret_val) return ret_val; - if ((*speed == SPEED_100 - && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) - || (*speed == SPEED_10 - && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) *duplex = HALF_DUPLEX; } } @@ -2664,9 +2666,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); if (ret_val) return ret_val; - if (phy_data & MII_SR_AUTONEG_COMPLETE) { + if (phy_data & MII_SR_AUTONEG_COMPLETE) return E1000_SUCCESS; - } + msleep(100); } return E1000_SUCCESS; @@ -2803,11 +2805,11 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) return data; } - /** * e1000_read_phy_reg - read a phy register * @hw: Struct containing variables accessed by shared code * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register * * Reads the value from a PHY register, if the value is on a specific non zero * page, sets the page first. @@ -2823,14 +2825,13 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (u16) reg_addr); - if (ret_val) { - spin_unlock_irqrestore(&e1000_phy_lock, flags); - return ret_val; - } + if (ret_val) + goto out; } ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, phy_data); +out: spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2881,7 +2882,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Read Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } else { mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | @@ -2906,7 +2907,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } } else { /* We must first send a preamble through the MDIO pin to signal @@ -2960,7 +2961,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) if ((hw->phy_type == e1000_phy_igp) && (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, - (u16) reg_addr); + (u16)reg_addr); if (ret_val) { spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2993,7 +2994,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, * the desired data. */ if (hw->mac_type == e1000_ce4100) { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (INTEL_CE_GBE_MDIC_OP_WRITE) | @@ -3015,7 +3016,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, return -E1000_ERR_PHY; } } else { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); @@ -3053,7 +3054,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); mdic <<= 16; - mdic |= (u32) phy_data; + mdic |= (u32)phy_data; e1000_shift_out_mdi_bits(hw, mdic, 32); } @@ -3176,14 +3177,14 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) if (ret_val) return ret_val; - hw->phy_id = (u32) (phy_id_high << 16); + hw->phy_id = (u32)(phy_id_high << 16); udelay(20); ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); if (ret_val) return ret_val; - hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); - hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; switch (hw->mac_type) { case e1000_82543: @@ -3401,7 +3402,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> SR_1000T_REMOTE_RX_STATUS_SHIFT) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - } return E1000_SUCCESS; @@ -3449,7 +3449,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) if (hw->phy_type == e1000_phy_igp) return e1000_phy_igp_get_info(hw, phy_info); else if ((hw->phy_type == e1000_phy_8211) || - (hw->phy_type == e1000_phy_8201)) + (hw->phy_type == e1000_phy_8201)) return E1000_SUCCESS; else return e1000_phy_m88_get_info(hw, phy_info); @@ -3611,11 +3611,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) */ mask = 0x01 << (count - 1); eecd = er32(EECD); - if (eeprom->type == e1000_eeprom_microwire) { + if (eeprom->type == e1000_eeprom_microwire) eecd &= ~E1000_EECD_DO; - } else if (eeprom->type == e1000_eeprom_spi) { + else if (eeprom->type == e1000_eeprom_spi) eecd |= E1000_EECD_DO; - } + do { /* A "1" is shifted out to the EEPROM by setting bit "DI" to a * "1", and then raising and then lowering the clock (the SK bit @@ -3851,7 +3851,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) do { e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, hw->eeprom.opcode_bits); - spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) break; @@ -3882,9 +3882,10 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_read_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -3896,15 +3897,16 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds. Words = %d," "size = %d\n", offset, eeprom->word_size); return -E1000_ERR_EEPROM; @@ -3940,7 +3942,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, /* Send the READ command (opcode + addr) */ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), eeprom->address_bits); /* Read the data. The address of the eeprom internally @@ -3960,7 +3962,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + i), + e1000_shift_out_ee_bits(hw, (u16)(offset + i), eeprom->address_bits); /* Read the data. For microwire, each word requires the @@ -3968,6 +3970,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, */ data[i] = e1000_shift_in_ee_bits(hw, 16); e1000_standby_eeprom(hw); + cond_resched(); } } @@ -4004,7 +4007,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) return E1000_SUCCESS; #endif - if (checksum == (u16) EEPROM_SUM) + if (checksum == (u16)EEPROM_SUM) return E1000_SUCCESS; else { e_dbg("EEPROM Checksum Invalid\n"); @@ -4031,7 +4034,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) } checksum += eeprom_data; } - checksum = (u16) EEPROM_SUM - checksum; + checksum = (u16)EEPROM_SUM - checksum; if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { e_dbg("EEPROM Write Error\n"); return -E1000_ERR_EEPROM; @@ -4052,9 +4055,10 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_write_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -4066,15 +4070,16 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } @@ -4116,6 +4121,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_EEPROM; e1000_standby_eeprom(hw); + cond_resched(); /* Send the WRITE ENABLE command (8 bit opcode ) */ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, @@ -4132,7 +4138,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, /* Send the Write command (8-bit opcode + addr) */ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), eeprom->address_bits); /* Send the data */ @@ -4142,6 +4148,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, */ while (widx < words) { u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); e1000_shift_out_ee_bits(hw, word_out, 16); widx++; @@ -4183,9 +4190,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM into write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); /* Prepare the EEPROM */ e1000_standby_eeprom(hw); @@ -4195,7 +4202,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), eeprom->address_bits); /* Send the data */ @@ -4224,6 +4231,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, /* Recover from write */ e1000_standby_eeprom(hw); + cond_resched(); words_written++; } @@ -4235,9 +4243,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM out of write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); return E1000_SUCCESS; } @@ -4260,8 +4268,8 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw) e_dbg("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } - hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); - hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); } switch (hw->mac_type) { @@ -4328,19 +4336,19 @@ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) */ case 0: /* [47:36] i.e. 0x563 for above example address */ - hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* [46:35] i.e. 0xAC6 for above example address */ - hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* [45:34] i.e. 0x5D8 for above example address */ - hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* [43:32] i.e. 0x634 for above example address */ - hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; } @@ -4361,9 +4369,9 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx * unit hang. @@ -4537,7 +4545,7 @@ s32 e1000_setup_led(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - (u16) (hw->phy_spd_default & + (u16)(hw->phy_spd_default & ~IGP01E1000_GMII_SPD)); if (ret_val) return ret_val; @@ -4802,7 +4810,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw) void e1000_update_adaptive(struct e1000_hw *hw) { if (hw->adaptive_ifs) { - if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { if (hw->tx_packet_delta > MIN_NUM_XMITS) { hw->in_ifs_mode = true; if (hw->current_ifs_val < hw->ifs_max_val) { @@ -4816,8 +4824,8 @@ void e1000_update_adaptive(struct e1000_hw *hw) } } } else { - if (hw->in_ifs_mode - && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { hw->current_ifs_val = 0; hw->in_ifs_mode = false; ew32(AIT, 0); @@ -4922,7 +4930,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Use old method for Phy older than IGP */ if (hw->phy_type == e1000_phy_m88) { - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) @@ -4966,7 +4973,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) @@ -4976,8 +4982,8 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Value bound check. */ if ((cur_agc_value >= - IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) - || (cur_agc_value == 0)) + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) return -E1000_ERR_PHY; agc_value += cur_agc_value; @@ -5054,7 +5060,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, */ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - /* Read the GIG initialization PCS register (0x00B4) */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, @@ -5175,8 +5180,8 @@ static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) hw->ffe_config_state = e1000_ffe_config_active; ret_val = e1000_write_phy_reg(hw, - IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_CM_CP); + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); if (ret_val) return ret_val; break; @@ -5243,7 +5248,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { @@ -5264,7 +5269,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5299,7 +5304,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; ret_val = @@ -5309,7 +5314,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) return ret_val; ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5346,9 +5351,8 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw) ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); - if (ret_val) { + if (ret_val) return ret_val; - } if ((eeprom_data != EEPROM_RESERVED_WORD) && (eeprom_data & EEPROM_PHY_CLASS_A)) { @@ -5395,8 +5399,8 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) * from the lowest speeds starting from 10Mbps. The capability is used * for Dx transitions and states */ - if (hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); if (ret_val) @@ -5446,11 +5450,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) if (ret_val) return ret_val; } - } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) - || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) - || (hw->autoneg_advertised == - AUTONEG_ADVERTISE_10_100_ALL)) { - + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { phy_data |= IGP01E1000_GMII_FLEX_SPD; @@ -5474,7 +5476,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) phy_data); if (ret_val) return ret_val; - } return E1000_SUCCESS; } @@ -5542,7 +5543,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw) return E1000_SUCCESS; } - /** * e1000_enable_mng_pass_thru - check for bmc pass through * @hw: Struct containing variables accessed by shared code diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index fd7be860c201..3fc7bde699ba 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -99,13 +99,13 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); void e1000_free_all_tx_resources(struct e1000_adapter *adapter); void e1000_free_all_rx_resources(struct e1000_adapter *adapter); static int e1000_setup_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *txdr); + struct e1000_tx_ring *txdr); static int e1000_setup_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rxdr); + struct e1000_rx_ring *rxdr); static void e1000_free_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_free_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); void e1000_update_stats(struct e1000_adapter *adapter); static int e1000_init_module(void); @@ -122,16 +122,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter); static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); static void e1000_clean_tx_ring(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_clean_rx_ring(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); static void e1000_set_rx_mode(struct net_device *netdev); static void e1000_update_phy_info_task(struct work_struct *work); static void e1000_watchdog(struct work_struct *work); static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static struct net_device_stats *e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); @@ -164,7 +164,7 @@ static void e1000_tx_timeout(struct net_device *dev); static void e1000_reset_task(struct work_struct *work); static void e1000_smartspeed(struct e1000_adapter *adapter); static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, - struct sk_buff *skb); + struct sk_buff *skb); static bool e1000_vlan_used(struct e1000_adapter *adapter); static void e1000_vlan_mode(struct net_device *netdev, @@ -195,7 +195,7 @@ MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive"); static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); + pci_channel_state_t state); static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); static void e1000_io_resume(struct pci_dev *pdev); @@ -287,7 +287,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) int err; err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); + netdev); if (err) { e_err(probe, "Unable to allocate interrupt Error: %d\n", err); } @@ -636,8 +636,8 @@ void e1000_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (hw->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -943,8 +943,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct e1000_adapter *adapter; struct e1000_hw *hw; - static int cards_found = 0; - static int global_quad_port_a = 0; /* global ksp3 port a indication */ + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ int i, err, pci_using_dac; u16 eeprom_data = 0; u16 tmp = 0; @@ -1046,7 +1046,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac_type == e1000_ce4100) { hw->ce4100_gbe_mdio_base_virt = ioremap(pci_resource_start(pdev, BAR_1), - pci_resource_len(pdev, BAR_1)); + pci_resource_len(pdev, BAR_1)); if (!hw->ce4100_gbe_mdio_base_virt) goto err_mdio_ioremap; @@ -1148,7 +1148,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; case e1000_82546: case e1000_82546_rev_3: - if (er32(STATUS) & E1000_STATUS_FUNC_1){ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { e1000_read_eeprom(hw, EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; @@ -1199,13 +1199,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 32; i++) { hw->phy_addr = i; e1000_read_phy_reg(hw, PHY_ID2, &tmp); - if (tmp == 0 || tmp == 0xFF) { - if (i == 31) - goto err_eeprom; - continue; - } else + + if (tmp != 0 && tmp != 0xFF) break; } + + if (i >= 32) + goto err_eeprom; } /* reset the hardware with the new settings */ @@ -1263,7 +1263,7 @@ err_pci_reg: * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. That could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ @@ -1334,12 +1334,12 @@ static int e1000_sw_init(struct e1000_adapter *adapter) static int e1000_alloc_queues(struct e1000_adapter *adapter) { adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct e1000_tx_ring), GFP_KERNEL); + sizeof(struct e1000_tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct e1000_rx_ring), GFP_KERNEL); + sizeof(struct e1000_rx_ring), GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; @@ -1811,20 +1811,20 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; switch (adapter->rx_buffer_len) { - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~E1000_RCTL_BSEX; - break; - case E1000_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case E1000_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case E1000_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384; - break; + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; } /* This is useful for sniffing bad packets. */ @@ -1861,12 +1861,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->netdev->mtu > ETH_DATA_LEN) { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_jumbo_rx_irq; adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -2761,7 +2761,9 @@ static int e1000_tso(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2816,7 +2818,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2865,8 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, * packet is smaller than 2048 - 16 - 16 (or 2016) bytes */ if (unlikely((hw->bus_type == e1000_bus_type_pcix) && - (size > 2015) && count == 0)) - size = 2015; + (size > 2015) && count == 0)) + size = 2015; /* Workaround for potential 82544 hang in PCI-X. Avoid * terminating buffers within evenly-aligned dwords. @@ -2963,7 +2967,7 @@ dma_error: count--; while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -3013,7 +3017,8 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_desc->lower.data = cpu_to_le32(txd_lower | buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); @@ -3101,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, return __e1000_maybe_stop_tx(netdev, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -3841,7 +3846,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; @@ -3869,14 +3874,18 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } - tx_ring->next_to_clean = i; + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); netdev_completed_queue(netdev, pkts_compl, bytes_compl); @@ -3954,9 +3963,11 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, skb_checksum_none_assert(skb); /* 82543 or newer only */ - if (unlikely(hw->mac_type < e1000_82543)) return; + if (unlikely(hw->mac_type < e1000_82543)) + return; /* Ignore Checksum bit is set */ - if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; /* TCP/UDP checksum error bit is set */ if (unlikely(errors & E1000_RXD_ERR_TCPE)) { /* let the stack verify checksum errors */ @@ -4136,7 +4147,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4153,7 +4164,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, status = rx_desc->status; - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4356,7 +4369,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4395,7 +4408,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, buffer_info->rxbuf.data = NULL; } - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4683,9 +4698,11 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) * we assume back-to-back */ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); if (phy_ctrl & CR_1000T_MS_ENABLE) { phy_ctrl &= ~CR_1000T_MS_ENABLE; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 133d4074dbe4..f7c7804d79e5 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -441,12 +441,13 @@ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ #define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ -#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_ICS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Transmit Descriptor Control */ #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0b748d1959d9..1dc293bad87b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -480,7 +480,7 @@ extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); -int e1000e_up(struct e1000_adapter *adapter); +void e1000e_up(struct e1000_adapter *adapter); void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); void e1000e_reset(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index c9da4654e9ca..b3949d5bef5c 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -91,6 +91,7 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */ #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 91a5a0ae9cd7..a049e30639a1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1984,7 +1984,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) int i = 0; while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && - (i++ < 10)) + (i++ < 30)) usleep_range(10000, 20000); return blocked ? E1000_BLK_PHY_RESET : 0; } @@ -3093,24 +3093,45 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; u8 sig_byte = 0; s32 ret_val; switch (hw->mac.type) { - /* In SPT, read from the CTRL_EXT reg instead of - * accessing the sector valid bits from the nvm - */ case e1000_pch_spt: - *bank = er32(CTRL_EXT) - & E1000_CTRL_EXT_NVMVS; - if ((*bank == 0) || (*bank == 1)) { - e_dbg("ERROR: No valid NVM bank present\n"); - return -E1000_ERR_NVM; - } else { - *bank = *bank - 2; + bank1_offset = nvm->flash_bank_size; + act_offset = E1000_ICH_NVM_SIG_WORD; + + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; return 0; } - break; + + /* Check bank 1 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + + bank1_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0a854a47d31a..c71ba1bfc1ec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1905,30 +1905,15 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - if (!(icr & E1000_ICR_INT_ASSERTED)) { - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); - return IRQ_NONE; - } - if (icr & adapter->eiac_mask) - ew32(ICS, (icr & adapter->eiac_mask)); + hw->mac.get_link_status = true; - if (icr & E1000_ICR_OTHER) { - if (!(icr & E1000_ICR_LSC)) - goto no_link_interrupt; - hw->mac.get_link_status = true; - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) { + mod_timer(&adapter->watchdog_timer, jiffies + 1); + ew32(IMS, E1000_IMS_OTHER); } -no_link_interrupt: - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); - return IRQ_HANDLED; } @@ -1946,6 +1931,9 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) /* Ring was not completely cleaned, so fire another interrupt */ ew32(ICS, tx_ring->ims_val); + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, adapter->tx_ring->ims_val); + return IRQ_HANDLED; } @@ -1959,8 +1947,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) * previous interrupt. */ if (rx_ring->set_itr) { - writel(1000000000 / (rx_ring->itr_val * 256), - rx_ring->itr_register); + u32 itr = rx_ring->itr_val ? + 1000000000 / (rx_ring->itr_val * 256) : 0; + + writel(itr, rx_ring->itr_register); rx_ring->set_itr = 0; } @@ -2025,6 +2015,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ ivar |= (1 << 31); @@ -2032,12 +2023,8 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ew32(IVAR, ivar); /* enable MSI-X PBA support */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; - - /* Auto-Mask Other interrupts upon ICR read */ - ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); - ctrl_ext |= E1000_CTRL_EXT_EIAME; + ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } @@ -2253,7 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); } else if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); @@ -4144,10 +4131,24 @@ void e1000e_reset(struct e1000_adapter *adapter) } -int e1000e_up(struct e1000_adapter *adapter) +/** + * e1000e_trigger_lsc - trigger an LSC interrupt + * @adapter: + * + * Fire a link status change interrupt to start the watchdog. + **/ +static void e1000e_trigger_lsc(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_OTHER); + else + ew32(ICS, E1000_ICS_LSC); +} + +void e1000e_up(struct e1000_adapter *adapter) +{ /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); @@ -4159,13 +4160,7 @@ int e1000e_up(struct e1000_adapter *adapter) netif_start_queue(adapter->netdev); - /* fire a link change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); - - return 0; + e1000e_trigger_lsc(adapter); } static void e1000e_flush_descriptors(struct e1000_adapter *adapter) @@ -4590,11 +4585,7 @@ static int e1000_open(struct net_device *netdev) hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); - /* fire a link status change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); + e1000e_trigger_lsc(adapter); return 0; @@ -6631,7 +6622,7 @@ static int e1000e_pm_runtime_resume(struct device *dev) return rc; if (netdev->flags & IFF_UP) - rc = e1000e_up(adapter); + e1000e_up(adapter); return rc; } @@ -6822,13 +6813,8 @@ static void e1000_io_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) { - if (e1000e_up(adapter)) { - dev_err(&pdev->dev, - "can't bring device back up after reset\n"); - return; - } - } + if (netif_running(netdev)) + e1000e_up(adapter); netif_device_attach(netdev); @@ -7465,6 +7451,7 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; @@ -7504,14 +7491,11 @@ static struct pci_driver e1000_driver = { **/ static int __init e1000_init_module(void) { - int ret; - pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); - ret = pci_register_driver(&e1000_driver); - return ret; + return pci_register_driver(&e1000_driver); } module_init(e1000_init_module); diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 08859dd220a8..b006ff66d028 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -27,7 +27,17 @@ obj-$(CONFIG_FM10K) += fm10k.o -fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \ - fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \ - fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \ - fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o +fm10k-y := fm10k_main.o \ + fm10k_common.o \ + fm10k_pci.o \ + fm10k_ptp.o \ + fm10k_netdev.o \ + fm10k_ethtool.o \ + fm10k_pf.o \ + fm10k_vf.o \ + fm10k_mbx.o \ + fm10k_iov.o \ + fm10k_tlv.o + +fm10k-$(CONFIG_DEBUG_FS) += fm10k_debugfs.o +fm10k-$(CONFIG_DCB) += fm10k_dcbnl.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 14440200499b..b34bb008b104 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -23,6 +23,7 @@ #include <linux/types.h> #include <linux/etherdevice.h> +#include <linux/cpumask.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/pci.h> @@ -33,7 +34,7 @@ #include "fm10k_pf.h" #include "fm10k_vf.h" -#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ +#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */ #define MAX_QUEUES FM10K_MAX_QUEUES_PF @@ -66,6 +67,7 @@ struct fm10k_l2_accel { enum fm10k_ring_state_t { __FM10K_TX_DETECT_HANG, __FM10K_HANG_CHECK_ARMED, + __FM10K_TX_XPS_INIT_DONE, }; #define check_for_tx_hang(ring) \ @@ -138,7 +140,7 @@ struct fm10k_ring { * different for DCB and RSS modes */ u8 qos_pc; /* priority class of queue */ - u16 vid; /* default vlan ID of queue */ + u16 vid; /* default VLAN ID of queue */ u16 count; /* amount of descriptors */ u16 next_to_alloc; @@ -164,14 +166,20 @@ struct fm10k_ring_container { unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ u16 itr; /* interrupt throttle rate value */ + u8 itr_scale; /* ITR adjustment based on PCI speed */ u8 count; /* total number of rings in vector */ }; #define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ #define FM10K_ITR_10K 100 /* 100us */ #define FM10K_ITR_20K 50 /* 50us */ +#define FM10K_ITR_40K 25 /* 25us */ #define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ +#define ITR_IS_ADAPTIVE(itr) (!!(itr & FM10K_ITR_ADAPTIVE)) + +#define FM10K_TX_ITR_DEFAULT FM10K_ITR_40K +#define FM10K_RX_ITR_DEFAULT FM10K_ITR_20K #define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) @@ -203,6 +211,7 @@ struct fm10k_q_vector { struct fm10k_ring_container rx, tx; struct napi_struct napi; + cpumask_t affinity_mask; char name[IFNAMSIZ + 9]; #ifdef CONFIG_DEBUG_FS @@ -413,7 +422,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) (&(((union fm10k_rx_desc *)((R)->desc))[i])) #define FM10K_MAX_TXD_PWR 14 -#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -434,7 +443,7 @@ union fm10k_ftag_info { struct { /* dglort and sglort combined into a single 32bit desc read */ __le32 glort; - /* upper 16 bits of vlan are reserved 0 for swpri_type_user */ + /* upper 16 bits of VLAN are reserved 0 for swpri_type_user */ __le32 vlan; } d; struct { @@ -484,7 +493,7 @@ void fm10k_netpoll(struct net_device *netdev); #endif /* Netdev */ -struct net_device *fm10k_alloc_netdev(void); +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); int fm10k_setup_rx_resources(struct fm10k_ring *); int fm10k_setup_tx_resources(struct fm10k_ring *); void fm10k_free_rx_resources(struct fm10k_ring *); @@ -551,5 +560,9 @@ int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); /* DCB */ +#ifdef CONFIG_DCB void fm10k_dcbnl_set_ops(struct net_device *dev); +#else +static inline void fm10k_dcbnl_set_ops(struct net_device *dev) {} +#endif #endif /* _FM10K_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 5c7a4d7662d8..2be4361839db 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -20,7 +20,6 @@ #include "fm10k.h" -#ifdef CONFIG_DCB /** * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device * @dev: netdev interface for the device @@ -155,7 +154,6 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { .setdcbx = fm10k_dcbnl_setdcbx, }; -#endif /* CONFIG_DCB */ /** * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev * @dev: netdev interface for the device @@ -164,11 +162,9 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { **/ void fm10k_dcbnl_set_ops(struct net_device *dev) { -#ifdef CONFIG_DCB struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_hw *hw = &interface->hw; if (hw->mac.type == fm10k_mac_pf) dev->dcbnl_ops = &fm10k_dcbnl_ops; -#endif /* CONFIG_DCB */ } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5304bc1fbecd..5d6137faf7d1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -18,8 +18,6 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ -#ifdef CONFIG_DEBUG_FS - #include "fm10k.h" #include <linux/debugfs.h> @@ -258,5 +256,3 @@ void fm10k_dbg_exit(void) debugfs_remove_recursive(dbg_root); dbg_root = NULL; } - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2ce0eba5e040..2f6a05b57228 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -111,12 +111,14 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_tx_busy", tx_busy), - FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped), + FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped), FM10K_MBX_STAT("mbx_tx_messages", tx_messages), FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), + FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled), FM10K_MBX_STAT("mbx_rx_messages", rx_messages), FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), + FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) @@ -125,7 +127,7 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN(_n) \ - ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) + ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ FM10K_NETDEV_STATS_LEN + \ @@ -257,7 +259,8 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) stats_len += FM10K_DEBUG_STATS_LEN; if (iov_data) - stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs; + stats_len += FM10K_MBX_STATS_LEN * + iov_data->num_vfs; } return stats_len; @@ -296,14 +299,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if (interface->flags & FM10K_FLAG_DEBUG_STATS) { for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset; + p = (char *)interface + + fm10k_gstrings_debug_stats[i].stat_offset; *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset; + p = (char *)&interface->hw.mbx + + fm10k_gstrings_mbx_stats[i].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -320,6 +325,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { for (i = 0; i < iov_data->num_vfs; i++) { struct fm10k_vf_info *vf_info; + vf_info = &iov_data->vf_info[i]; /* skip stats if we don't have a vf info */ @@ -329,7 +335,8 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, } for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset; + p = (char *)&vf_info->mbx + + fm10k_gstrings_mbx_stats[j].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -699,12 +706,10 @@ static int fm10k_get_coalesce(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); - ec->use_adaptive_tx_coalesce = - !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr); ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; - ec->use_adaptive_rx_coalesce = - !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr); ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; return 0; @@ -729,10 +734,10 @@ static int fm10k_set_coalesce(struct net_device *dev, /* set initial values for adaptive ITR */ if (ec->use_adaptive_tx_coalesce) - tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; + tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT; if (ec->use_adaptive_rx_coalesce) - rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* update interface */ interface->tx_itr = tx_itr; @@ -1020,7 +1025,6 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } - static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e76a44cf330c..b243c3cbe68f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.15.2-k" +#define DRV_VERSION "0.19.3-k" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = @@ -42,7 +42,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ -struct workqueue_struct *fm10k_workqueue = NULL; +struct workqueue_struct *fm10k_workqueue; /** * fm10k_init_module - Driver Registration Routine @@ -56,8 +56,7 @@ static int __init fm10k_init_module(void) pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ - if (!fm10k_workqueue) - fm10k_workqueue = create_workqueue("fm10k"); + fm10k_workqueue = create_workqueue("fm10k"); fm10k_dbg_init(); @@ -80,7 +79,6 @@ static void __exit fm10k_exit_module(void) /* destroy driver workqueue */ flush_workqueue(fm10k_workqueue); destroy_workqueue(fm10k_workqueue); - fm10k_workqueue = NULL; } module_exit(fm10k_exit_module); @@ -917,7 +915,7 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) /* set timestamping bits */ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - desc_flags |= FM10K_TXD_FLAG_TIME; + desc_flags |= FM10K_TXD_FLAG_TIME; /* set checksum offload bits */ desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, @@ -1094,11 +1092,11 @@ dma_error: netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring) { + u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct fm10k_tx_buffer *first; - int tso; - u32 tx_flags = 0; unsigned short f; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); + u32 tx_flags = 0; + int tso; /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, @@ -1363,10 +1361,10 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, **/ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) { - unsigned int avg_wire_size, packets; + unsigned int avg_wire_size, packets, itr_round; /* Only update ITR if we are using adaptive setting */ - if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) + if (!ITR_IS_ADAPTIVE(ring_container->itr)) goto clear_counts; packets = ring_container->total_packets; @@ -1375,18 +1373,44 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) avg_wire_size = ring_container->total_bytes / packets; - /* Add 24 bytes to size to account for CRC, preamble, and gap */ - avg_wire_size += 24; - - /* Don't starve jumbo frames */ - if (avg_wire_size > 3000) - avg_wire_size = 3000; + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (34 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 360) { + /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ + avg_wire_size *= 8; + avg_wire_size += 376; + } else if (avg_wire_size <= 1152) { + /* 77K ints/sec to 45K ints/sec */ + avg_wire_size *= 3; + avg_wire_size += 2176; + } else if (avg_wire_size <= 1920) { + /* 45K ints/sec to 38K ints/sec */ + avg_wire_size += 4480; + } else { + /* plateau at a limit of 38K ints/sec */ + avg_wire_size = 6656; + } - /* Give a little boost to mid-size frames */ - if ((avg_wire_size > 300) && (avg_wire_size < 1200)) - avg_wire_size /= 3; - else - avg_wire_size /= 2; + /* Perform final bitshift for division after rounding up to ensure + * that the calculation will never get below a 1. The bit shift + * accounts for changes in the ITR due to PCIe link speed. + */ + itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; + avg_wire_size += (1 << itr_round) - 1; + avg_wire_size >>= itr_round; /* write back value and retain adaptive flag */ ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; @@ -1428,11 +1452,15 @@ static int fm10k_poll(struct napi_struct *napi, int budget) fm10k_for_each_ring(ring, q_vector->tx) clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + return budget; + /* attempt to distribute budget to each queue fairly, but don't * allow the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) - per_ring_budget = max(budget/q_vector->rx.count, 1); + per_ring_budget = max(budget / q_vector->rx.count, 1); else per_ring_budget = budget; @@ -1600,6 +1628,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, q_vector->tx.ring = ring; q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; q_vector->tx.itr = interface->tx_itr; + q_vector->tx.itr_scale = interface->hw.mac.itr_scale; q_vector->tx.count = txr_count; while (txr_count) { @@ -1628,6 +1657,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, /* save Rx ring container info */ q_vector->rx.ring = ring; q_vector->rx.itr = interface->rx_itr; + q_vector->rx.itr_scale = interface->hw.mac.itr_scale; q_vector->rx.count = rxr_count; while (rxr_count) { @@ -1966,8 +1996,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); - if (err) + if (err) { + fm10k_reset_msix_capability(interface); return err; + } /* Map rings to devices, and map devices to physical queues */ fm10k_assign_rings(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index af09a1b272e6..98202c3d591c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -57,7 +57,7 @@ static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) } /** - * fm10k_fifo_empty - Test to verify if fifo is empty + * fm10k_fifo_empty - Test to verify if FIFO is empty * @fifo: pointer to FIFO * * This function returns true if the FIFO is empty, else false @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indices into the fifo based on head + offset + * This function returns the indices into the FIFO based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indices into the fifo based on tail + offset + * This function returns the indices into the FIFO based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -160,7 +160,7 @@ static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) /** * fm10k_mbx_tail_add - Determine new tail value with added offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -176,7 +176,7 @@ static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) /** * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -240,7 +240,7 @@ static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) } /** - * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo + * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO * @fifo: pointer to FIFO * @msg: message array to populate * @tail_offset: additional offset to add to tail pointer @@ -336,6 +336,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) /** * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will take a section of the Tx FIFO and copy it into the @@ -375,6 +376,8 @@ static void fm10k_mbx_write_copy(struct fm10k_hw *hw, if (!tail) tail++; + mbx->tx_mbmem_pulled++; + /* write message to hardware FIFO */ fm10k_write_reg(hw, mbmem + tail++, *(head++)); } while (--len && --end); @@ -459,6 +462,8 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, if (!head) head++; + mbx->rx_mbmem_pushed++; + /* read message from hardware FIFO */ *(tail++) = fm10k_read_reg(hw, mbmem + head++); } while (--len && --end); @@ -707,7 +712,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function dequeues messages and hands them off to the tlv parser. + * This function dequeues messages and hands them off to the TLV parser. * It will return the number of messages processed when called. **/ static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, @@ -899,7 +904,7 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr * @mbx: pointer to mailbox * * This function creates a fake disconnect header for loading into remote @@ -920,7 +925,7 @@ static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_error_msg - Generate a error message + * fm10k_mbx_create_error_msg - Generate an error message * @mbx: pointer to mailbox * @err: local error encountered * @@ -953,7 +958,6 @@ static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) /** * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header * @mbx: pointer to mailbox - * @msg: message array to read * * This function will parse up the fields in the mailbox header and return * an error if the header contains any of a number of invalid configurations @@ -1017,11 +1021,12 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -1147,8 +1152,8 @@ static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_process_connect - Process connect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox - * @msg: message array to process * * This function will read an incoming connect header and reply with the * appropriate message. It will return a value indicating the number of @@ -1194,6 +1199,7 @@ static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, /** * fm10k_mbx_process_data - Process data header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming data header and reply with the @@ -1235,6 +1241,7 @@ static s32 fm10k_mbx_process_data(struct fm10k_hw *hw, /** * fm10k_mbx_process_disconnect - Process disconnect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming disconnect header and reply with the @@ -1287,6 +1294,7 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, /** * fm10k_mbx_process_error - Process error header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming error header and reply with the @@ -1556,7 +1564,7 @@ static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes * * This function initializes the mailbox for use. It will split the - * buffer provided an use that th populate both the Tx and Rx FIFO by + * buffer provided and use that to populate both the Tx and Rx FIFO by * evenly splitting it. In order to allow for easy masking of head/tail * the value reported in size must be a power of 2 and is reported in * DWORDs, not bytes. Any invalid values will cause the mailbox to return @@ -1633,7 +1641,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO * @mbx: pointer to mailbox * - * This function returns a connection mailbox header + * This function returns a data mailbox header **/ static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) { @@ -1726,8 +1734,6 @@ static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) fm10k_sm_mbx_create_connect_hdr(mbx, 0); fm10k_mbx_write(hw, mbx); - /* enable interrupt and notify other party of new message */ - return 0; } @@ -1771,7 +1777,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, } /** - * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header * @mbx: pointer to mailbox * * This function will parse up the fields in the mailbox header and return @@ -1849,7 +1855,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) } /** - * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr + * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header * @mbx: pointer to mailbox * @err: local error encountered * @@ -1879,6 +1885,7 @@ static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @tail: tail index of message * * This function will dequeue one message from the Rx switch manager mailbox * FIFO and place it in the Rx mailbox FIFO for processing by software. @@ -1918,6 +1925,7 @@ static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @head: head index of message * * This function will dequeue one message from the Tx mailbox FIFO and place * it in the Tx switch manager mailbox FIFO for processing by hardware. @@ -1957,11 +1965,12 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /** * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -2073,7 +2082,7 @@ send_reply: } /** - * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt + * fm10k_sm_mbx_process - Process switch manager mailbox interrupt * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @@ -2129,13 +2138,19 @@ fifo_err: * @mbx: pointer to mailbox * @msg_data: handlers for mailbox events * - * This function for now is used to stub out the PF/SM mailbox + * This function initializes the PF/SM mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. **/ s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; mbx->mbmem_reg = FM10K_MBMEM_PF(0); + /* start out in closed state */ mbx->state = FM10K_STATE_CLOSED; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 0419a7f0035e..245a0a3dc32e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -128,11 +128,11 @@ enum fm10k_mbx_state { * The maximum message size is provided during connect to avoid * jamming the mailbox with messages that do not fit. * Err_no: Error number - Applies only to error headers - * The error number provides a indication of the type of error + * The error number provides an indication of the type of error * experienced. */ -/* macros for retriving and setting header values */ +/* macros for retrieving and setting header values */ #define FM10K_MSG_HDR_MASK(name) \ ((0x1u << FM10K_MSG_##name##_SIZE) - 1) #define FM10K_MSG_HDR_FIELD_SET(value, name) \ @@ -291,8 +291,10 @@ struct fm10k_mbx_info { u64 tx_dropped; u64 tx_messages; u64 tx_dwords; + u64 tx_mbmem_pulled; u64 rx_messages; u64 rx_dwords; + u64 rx_mbmem_pushed; u64 rx_parse_err; /* Buffer to store messages */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 7781e80896a6..662569d5b7c0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,7 +20,7 @@ #include "fm10k.h" #include <linux/vmalloc.h> -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN #include <net/vxlan.h> #endif /* CONFIG_FM10K_VXLAN */ @@ -556,11 +556,11 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN /* update VXLAN port configuration */ vxlan_get_rx_port(netdev); - #endif + fm10k_up(interface); return 0; @@ -608,7 +608,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) unsigned int r_idx = skb->queue_mapping; int err; - if ((skb->protocol == htons(ETH_P_8021Q)) && + if ((skb->protocol == htons(ETH_P_8021Q)) && !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame * are considered 2nd level or "outer" tags @@ -632,7 +632,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* locate vlan header */ + /* locate VLAN header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); /* pull the 2 key pieces of data out of it */ @@ -705,7 +705,7 @@ static void fm10k_tx_timeout(struct net_device *netdev) } else { netif_info(interface, drv, netdev, "Fake Tx hang detected with timeout of %d seconds\n", - netdev->watchdog_timeo/HZ); + netdev->watchdog_timeo / HZ); /* fake Tx hang - increase the kernel timeout */ if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) @@ -778,7 +778,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!set) clear_bit(vid, interface->active_vlans); - /* disable the default VID on ring if we have an active VLAN */ + /* disable the default VLAN ID on ring if we have an active VLAN */ for (i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = interface->rx_ring[i]; u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); @@ -789,7 +789,9 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) rx_ring->vid &= ~FM10K_VLAN_CLEAR; } - /* Do not remove default VID related entries from VLAN and MAC tables */ + /* Do not remove default VLAN ID related entries from VLAN and MAC + * tables + */ if (!set && vid == hw->mac.default_vid) return 0; @@ -814,7 +816,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (err) goto err_out; - /* set vid prior to syncing/unsyncing the VLAN */ + /* set VLAN ID prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); /* Update the unicast and multicast address list to add/drop VLAN */ @@ -1151,6 +1153,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, int fm10k_setup_tc(struct net_device *dev, u8 tc) { struct fm10k_intfc *interface = netdev_priv(dev); + int err; /* Currently only the PF supports priority classes */ if (tc && (interface->hw.mac.type != fm10k_mac_pf)) @@ -1175,17 +1178,30 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc) netdev_reset_tc(dev); netdev_set_num_tc(dev, tc); - fm10k_init_queueing_scheme(interface); + err = fm10k_init_queueing_scheme(interface); + if (err) + goto err_queueing_scheme; - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; - if (netif_running(dev)) - fm10k_open(dev); + err = netif_running(dev) ? fm10k_open(dev) : 0; + if (err) + goto err_open; /* flag to indicate SWPRI has yet to be updated */ interface->flags |= FM10K_FLAG_SWPRI_CONFIG; return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + netif_device_detach(dev); + + return err; } static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -1355,7 +1371,7 @@ static netdev_features_t fm10k_features_check(struct sk_buff *skb, if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) return features; - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } static const struct net_device_ops fm10k_netdev_ops = { @@ -1388,8 +1404,9 @@ static const struct net_device_ops fm10k_netdev_ops = { #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -struct net_device *fm10k_alloc_netdev(void) +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) { + netdev_features_t hw_features; struct fm10k_intfc *interface; struct net_device *dev; @@ -1412,27 +1429,31 @@ struct net_device *fm10k_alloc_netdev(void) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXHASH | NETIF_F_RXCSUM; + /* Only the PF can support VXLAN and NVGRE tunnel offloads */ + if (info->mac == fm10k_mac_pf) { + dev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + /* all features defined to this point should be changeable */ - dev->hw_features |= dev->features; + hw_features = dev->features; /* allow user to enable L2 forwarding acceleration */ - dev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; /* configure VLAN features */ dev->vlan_features |= dev->features; - /* configure tunnel offloads */ - dev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_IPV6_CSUM; - /* we want to leave these both on as we cannot disable VLAN tag * insertion or stripping on the hardware since it is contained * in the FTAG and not in the frame itself. @@ -1443,5 +1464,7 @@ struct net_device *fm10k_alloc_netdev(void) dev->priv_flags |= IFF_UNICAST_FLT; + dev->hw_features |= hw_features; + return dev; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 74be792f3f1b..4eb7a6fa6b0d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -159,16 +159,40 @@ static void fm10k_reinit(struct fm10k_intfc *interface) fm10k_mbx_free_irq(interface); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); - if (err) + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); + goto reinit_err; + } + + err = hw->mac.ops.init_hw(hw); + if (err) { dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + goto reinit_err; + } + + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + goto reinit_err; + } /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + + err = fm10k_hw_ready(interface); + if (err) + goto err_open; /* update hardware address for VFs if perm_addr has changed */ if (hw->mac.type == fm10k_mac_vf) { @@ -188,14 +212,27 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* reset clock */ fm10k_ts_reset(interface); - if (netif_running(netdev)) - fm10k_open(netdev); + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; fm10k_iov_resume(interface->pdev); rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); + + return; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +reinit_err: + netif_device_detach(netdev); + + rtnl_unlock(); + + clear_bit(__FM10K_RESETTING, &interface->state); } static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -563,7 +600,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; @@ -579,6 +616,13 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), FM10K_PFVTCTL_FTAG_DESC_ENABLE); + /* Initialize XPS */ + if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) && + ring->q_vector) + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + /* enable queue */ fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); } @@ -669,7 +713,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; ring->next_to_alloc = 0; @@ -694,7 +738,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* assign default VLAN to queue */ ring->vid = hw->mac.default_vid; - /* if we have an active VLAN, disable default VID */ + /* if we have an active VLAN, disable default VLAN ID */ if (test_bit(hw->mac.default_vid, interface->active_vlans)) ring->vid |= FM10K_VLAN_CLEAR; @@ -846,7 +890,7 @@ static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) struct fm10k_q_vector *q_vector = data; if (q_vector->rx.count || q_vector->tx.count) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -859,7 +903,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); /* service upstream mailbox */ if (fm10k_mbx_trylock(interface)) { @@ -867,7 +912,7 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) fm10k_mbx_unlock(interface); } - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; fm10k_service_event_schedule(interface); return IRQ_HANDLED; @@ -897,7 +942,7 @@ void fm10k_netpoll(struct net_device *netdev) #endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, - struct fm10k_fault *fault) + struct fm10k_fault *fault) { struct pci_dev *pdev = interface->pdev; struct fm10k_hw *hw = &interface->hw; @@ -1083,14 +1128,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) } /* we should validate host state after interrupt event */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; /* validate host state, and handle VF mailboxes in the service task */ fm10k_service_event_schedule(interface); /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); return IRQ_HANDLED; } @@ -1101,6 +1147,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int itr_reg; + /* no mailbox IRQ to free if MSI-X is not enabled */ + if (!interface->msix_entries) + return; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); @@ -1141,7 +1191,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, /* MAC was changed so we need reset */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) interface->flags |= FM10K_FLAG_RESET_REQUESTED; /* VLAN override was changed, or default VLAN changed */ @@ -1269,7 +1319,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1388,14 +1438,14 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) } /* Enable interrupts w/ no moderation for "other" interrupts */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); /* Enable interrupts w/ moderation for mailbox */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); /* Enable individual interrupt causes */ fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -1423,10 +1473,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) err = fm10k_mbx_request_irq_pf(interface); else err = fm10k_mbx_request_irq_vf(interface); + if (err) + return err; /* connect mailbox */ - if (!err) - err = hw->mbx.ops.connect(hw, &hw->mbx); + err = hw->mbx.ops.connect(hw, &hw->mbx); + + /* if the mailbox failed to connect, then free IRQ */ + if (err) + fm10k_mbx_free_irq(interface); return err; } @@ -1455,8 +1510,10 @@ void fm10k_qv_free_irq(struct fm10k_intfc *interface) if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1514,6 +1571,9 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) goto err_out; } + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + /* Enable q_vector */ writel(FM10K_ITR_ENABLE, q_vector->itr); @@ -1534,8 +1594,10 @@ err_out: if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1573,7 +1635,7 @@ void fm10k_up(struct fm10k_intfc *interface) netif_tx_start_all_queues(interface->netdev); /* kick off the service timer now */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; mod_timer(&interface->service_timer, jiffies); } @@ -1684,7 +1746,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "reset_hw failed: %d\n", err); + return err; + } + + err = hw->mac.ops.init_hw(hw); if (err) { dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; @@ -1722,13 +1790,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, pci_resource_len(pdev, 4)); hw->sw_addr = interface->sw_addr; - /* Only the PF can support VXLAN and NVGRE offloads */ - if (hw->mac.type != fm10k_mac_pf) { - netdev->hw_enc_features = 0; - netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; - } - /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); @@ -1749,8 +1810,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->rx_ring_count = FM10K_DEFAULT_RXD; /* set default interrupt moderation */ - interface->tx_itr = FM10K_ITR_10K; - interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + interface->tx_itr = FM10K_TX_ITR_DEFAULT; + interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); @@ -1835,17 +1896,18 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) return; } - if (max_gts < expected_gts) { - dev_warn(&interface->pdev->dev, - "This device requires %dGT/s of bandwidth for optimal performance.\n", - expected_gts); - dev_warn(&interface->pdev->dev, - "A %sslot with x%d lanes is suggested.\n", - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), - hw->bus_caps.width); - } + if (max_gts >= expected_gts) + return; + + dev_warn(&interface->pdev->dev, + "This device requires %dGT/s of bandwidth for optimal performance.\n", + expected_gts); + dev_warn(&interface->pdev->dev, + "A %sslot with x%d lanes is suggested.\n", + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), + hw->bus_caps.width); } /** @@ -1859,8 +1921,7 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) * The OS initialization, configuring of the interface private structure, * and a hardware reset occur. **/ -static int fm10k_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct fm10k_intfc *interface; @@ -1894,7 +1955,7 @@ static int fm10k_probe(struct pci_dev *pdev, pci_set_master(pdev); pci_save_state(pdev); - netdev = fm10k_alloc_netdev(); + netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); if (!netdev) { err = -ENOMEM; goto err_alloc_netdev; @@ -2071,8 +2132,10 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset hardware to known state */ err = hw->mac.ops.init_hw(&interface->hw); - if (err) + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); @@ -2083,16 +2146,22 @@ static int fm10k_resume(struct pci_dev *pdev) rtnl_lock(); err = fm10k_init_queueing_scheme(interface); - if (!err) { - fm10k_mbx_request_irq(interface); - if (netif_running(netdev)) - err = fm10k_open(netdev); - } + if (err) + goto err_queueing_scheme; - rtnl_unlock(); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + err = fm10k_hw_ready(interface); if (err) - return err; + goto err_open; + + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; + + rtnl_unlock(); /* assume host is not ready, to prevent race with watchdog in case we * actually don't have connection to the switch @@ -2110,6 +2179,14 @@ static int fm10k_resume(struct pci_dev *pdev) netif_device_attach(netdev); return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + rtnl_unlock(); + + return err; } /** @@ -2185,6 +2262,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) fm10k_close(netdev); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + fm10k_mbx_free_irq(interface); pci_disable_device(pdev); @@ -2248,11 +2328,22 @@ static void fm10k_io_resume(struct pci_dev *pdev) int err = 0; /* reset hardware to known state */ - hw->mac.ops.init_hw(&interface->hw); + err = hw->mac.ops.init_hw(&interface->hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + return; + } + /* reassociate interrupts */ fm10k_mbx_request_irq(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 8c0bdc4e4edd..62ccebc5f728 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -150,19 +150,26 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) FM10K_TPH_RXCTRL_HDR_WROEN); } - /* set max hold interval to align with 1.024 usec in all modes */ + /* set max hold interval to align with 1.024 usec in all modes and + * store ITR scale + */ switch (hw->bus.speed) { case fm10k_bus_speed_2500: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; break; case fm10k_bus_speed_5000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; break; case fm10k_bus_speed_8000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; default: dma_ctrl = 0; + /* just in case, assume Gen3 ITR scale */ + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; } @@ -259,7 +266,6 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) { u8 perm_addr[ETH_ALEN]; u32 serial_num; - int i; serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); @@ -281,10 +287,8 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) perm_addr[4] = (u8)(serial_num >> 8); perm_addr[5] = (u8)(serial_num); - for (i = 0; i < ETH_ALEN; i++) { - hw->mac.perm_addr[i] = perm_addr[i]; - hw->mac.addr[i] = perm_addr[i]; - } + ether_addr_copy(hw->mac.perm_addr, perm_addr); + ether_addr_copy(hw->mac.addr, perm_addr); return 0; } @@ -325,7 +329,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, /* clear set bit from VLAN ID */ vid &= ~FM10K_VLAN_CLEAR; - /* if glort or vlan are not valid return error */ + /* if glort or VLAN are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -334,8 +338,8 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, ((u32)mac[3] << 16) | ((u32)mac[4] << 8) | ((u32)mac[5])); - mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) | - ((u32)mac[1])); + mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) | + ((u16)mac[1])); mac_update.vlan = cpu_to_le16(vid); mac_update.glort = cpu_to_le16(glort); mac_update.action = add ? 0 : 1; @@ -410,6 +414,7 @@ static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; @@ -903,6 +908,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); + /* Provide the VF the ITR scale, using software-defined fields in TDLEN + * to pass the information during VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + err_out: /* configure Queue control register */ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & @@ -910,7 +922,7 @@ err_out: txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; - /* assign VID */ + /* assign VLAN ID */ for (i = 0; i < queues_per_pool; i++) fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); @@ -1035,6 +1047,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an + * explanation of how TDLEN is used. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i), + hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); } @@ -1155,14 +1173,14 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, } /** - * fm10k_iov_select_vid - Select correct default VID + * fm10k_iov_select_vid - Select correct default VLAN ID * @hw: Pointer to hardware structure - * @vid: VID to correct + * @vid: VLAN ID to correct * - * Will report an error if VID is out of range. For VID = 0, it will return - * either the pf_vid or sw_vid depending on which one is set. + * Will report an error if the VLAN ID is out of range. For VID = 0, it will + * return either the pf_vid or sw_vid depending on which one is set. */ -static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) { if (!vid) return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; @@ -1212,11 +1230,11 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; - err = fm10k_iov_select_vid(vf_info, vid); + err = fm10k_iov_select_vid(vf_info, (u16)vid); if (err < 0) return err; - else - vid = err; + + vid = err; /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); @@ -1232,7 +1250,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, /* block attempts to set MAC for a locked device */ if (is_valid_ether_addr(vf_info->mac) && - memcmp(mac, vf_info->mac, ETH_ALEN)) + !ether_addr_equal(mac, vf_info->mac)) return FM10K_ERR_PARAM; set = !(vlan & FM10K_VLAN_CLEAR); @@ -1241,8 +1259,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new unicast address */ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, @@ -1267,8 +1285,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new multicast address */ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, @@ -1396,14 +1414,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return err; } -const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { - FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), - FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), - FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), - FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - /** * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure @@ -1431,9 +1441,10 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, &stats->vlan_drop); - loopback_drop = fm10k_read_hw_stats_32b(hw, - FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + loopback_drop = + fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1678,8 +1689,8 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * * This handler configures the default VLAN for the PF **/ -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1698,7 +1709,7 @@ s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1855,39 +1866,39 @@ static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_pf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_pf, - .init_hw = &fm10k_init_hw_pf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_generic, - .update_vlan = &fm10k_update_vlan_pf, - .read_mac_addr = &fm10k_read_mac_addr_pf, - .update_uc_addr = &fm10k_update_uc_addr_pf, - .update_mc_addr = &fm10k_update_mc_addr_pf, - .update_xcast_mode = &fm10k_update_xcast_mode_pf, - .update_int_moderator = &fm10k_update_int_moderator_pf, - .update_lport_state = &fm10k_update_lport_state_pf, - .update_hw_stats = &fm10k_update_hw_stats_pf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_pf, - .configure_dglort_map = &fm10k_configure_dglort_map_pf, - .set_dma_mask = &fm10k_set_dma_mask_pf, - .get_fault = &fm10k_get_fault_pf, - .get_host_state = &fm10k_get_host_state_pf, - .adjust_systime = &fm10k_adjust_systime_pf, - .read_systime = &fm10k_read_systime_pf, +static const struct fm10k_mac_ops mac_ops_pf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_pf, + .init_hw = fm10k_init_hw_pf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_generic, + .update_vlan = fm10k_update_vlan_pf, + .read_mac_addr = fm10k_read_mac_addr_pf, + .update_uc_addr = fm10k_update_uc_addr_pf, + .update_mc_addr = fm10k_update_mc_addr_pf, + .update_xcast_mode = fm10k_update_xcast_mode_pf, + .update_int_moderator = fm10k_update_int_moderator_pf, + .update_lport_state = fm10k_update_lport_state_pf, + .update_hw_stats = fm10k_update_hw_stats_pf, + .rebind_hw_stats = fm10k_rebind_hw_stats_pf, + .configure_dglort_map = fm10k_configure_dglort_map_pf, + .set_dma_mask = fm10k_set_dma_mask_pf, + .get_fault = fm10k_get_fault_pf, + .get_host_state = fm10k_get_host_state_pf, + .adjust_systime = fm10k_adjust_systime_pf, + .read_systime = fm10k_read_systime_pf, }; -static struct fm10k_iov_ops iov_ops_pf = { - .assign_resources = &fm10k_iov_assign_resources_pf, - .configure_tc = &fm10k_iov_configure_tc_pf, - .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf, +static const struct fm10k_iov_ops iov_ops_pf = { + .assign_resources = fm10k_iov_assign_resources_pf, + .configure_tc = fm10k_iov_configure_tc_pf, + .assign_int_moderator = fm10k_iov_assign_int_moderator_pf, .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, - .reset_resources = &fm10k_iov_reset_resources_pf, - .set_lport = &fm10k_iov_set_lport_pf, - .reset_lport = &fm10k_iov_reset_lport_pf, - .update_stats = &fm10k_iov_update_stats_pf, - .report_timestamp = &fm10k_iov_report_timestamp_pf, + .reset_resources = fm10k_iov_reset_resources_pf, + .set_lport = fm10k_iov_set_lport_pf, + .reset_lport = fm10k_iov_reset_lport_pf, + .update_stats = fm10k_iov_update_stats_pf, + .report_timestamp = fm10k_iov_report_timestamp_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) @@ -1897,9 +1908,9 @@ static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); } -struct fm10k_info fm10k_pf_info = { +const struct fm10k_info fm10k_pf_info = { .mac = fm10k_mac_pf, - .get_invariants = &fm10k_get_invariants_pf, + .get_invariants = fm10k_get_invariants_pf, .mac_ops = &mac_ops_pf, .iov_ops = &iov_ops_pf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 40a0dbc62a04..b2d96b45ca3c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -74,6 +74,11 @@ enum fm10k_pf_tlv_attr_id_v1 { #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 +/* The following data structures are overlayed directly onto TLV mailbox + * messages, and must not break 4 byte alignment. Ensure the structures line + * up correctly as per their TLV definition. + */ + struct fm10k_mac_update { __le32 mac_lower; __le16 mac_upper; @@ -81,34 +86,32 @@ struct fm10k_mac_update { __le16 glort; u8 flags; u8 action; -} __packed; +} __aligned(4) __packed; struct fm10k_global_table_data { __le32 used; __le32 avail; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_error { __le32 status; struct fm10k_global_table_data mac; struct fm10k_global_table_data nexthop; struct fm10k_global_table_data ffu; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_1588_timestamp { __le64 egress; __le64 ingress; __le16 dglort; __le16 sglort; -} __packed; +} __aligned(4) __packed; s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ fm10k_lport_map_msg_attr, func) -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **, - struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; #define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ @@ -129,7 +132,6 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; -extern struct fm10k_info fm10k_pf_info; +extern const struct fm10k_info fm10k_pf_info; #endif /* _FM10K_PF_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 9b29d7b0377a..ab01bb30752f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,8 +48,8 @@ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) * the attribute buffer. It will return success if provided with a valid * pointers. **/ -s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, - const unsigned char *string) +static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) { u32 attr_data = 0, len = 0; u32 *attr; @@ -98,7 +98,7 @@ s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, * it in the array pointed by by string. It will return success if provided * with a valid pointers. **/ -s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) { u32 len; @@ -353,7 +353,7 @@ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) * function will return NULL on failure, and a pointer to the start * of the nested attributes on success. **/ -u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) { u32 *attr; @@ -370,7 +370,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) } /** - * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes * @msg: Pointer to message block * * This function closes off an existing set of nested attributes. The @@ -378,7 +378,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) * the case of a nest within the nest this would be the outer nest pointer. * This function will return success provided all pointers are valid. **/ -s32 fm10k_tlv_attr_nest_stop(u32 *msg) +static s32 fm10k_tlv_attr_nest_stop(u32 *msg) { u32 *attr; u32 len; @@ -483,8 +483,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array * and 0 on success. **/ -s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, - const struct fm10k_tlv_attr *tlv_attr) +static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; s32 err = 0; @@ -755,7 +755,7 @@ parse_nested: err = fm10k_tlv_attr_get_mac_vlan( results[FM10K_TEST_MSG_MAC_ADDR], result_mac, &result_vlan); - if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + if (!err && !ether_addr_equal(test_mac, result_mac)) err = FM10K_ERR_INVALID_VALUE; if (!err && test_vlan != result_vlan) err = FM10K_ERR_INVALID_VALUE; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 7e045e8bf1eb..e1845e0a17d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,9 +38,9 @@ struct fm10k_msg_data; * mailbox size we will provide a message with the above header and it * will be segmented and transported to the mailbox to the other side where * it is reassembled. It contains the following fields: - * Len: Length of the message in bytes excluding the message header + * Length: Length of the message in bytes excluding the message header * Flags: TBD - * Rule: These will be the message/argument types we pass + * Type/ID: These will be the message/argument types we pass */ /* message data header */ #define FM10K_TLV_ID_SHIFT 0 @@ -106,8 +106,6 @@ struct fm10k_msg_data { #define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } s32 fm10k_tlv_msg_init(u32 *, u16); -s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *); -s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *); s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); s32 fm10k_tlv_attr_put_bool(u32 *, u16); @@ -147,9 +145,6 @@ s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); -u32 *fm10k_tlv_attr_nest_start(u32 *, u16); -s32 fm10k_tlv_attr_nest_stop(u32 *); -s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *); s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, const struct fm10k_msg_data *); s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 318a212f0a78..854ebb1906bf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -77,6 +77,7 @@ struct fm10k_hw; #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 #define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 #define FM10K_ERR_REQUESTS_PENDING -4 #define FM10K_ERR_RESET_REQUESTED -5 #define FM10K_ERR_DMA_PENDING -6 @@ -271,6 +272,20 @@ struct fm10k_hw; #define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) #define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) #define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR) + * scale which is based on the PCIe speed but the speed information in the PCI + * configuration space may not be accurate. The PF already knows the ITR scale + * but there is no defined method to pass that information from the PF to the + * VF. This is accomplished during VF initialization by temporarily co-opting + * the yet-to-be-used TDLEN register to have the PF store the ITR shift for + * the VF to retrieve before the VF needs to use the TDLEN register for its + * intended purpose, i.e. before the Tx resources are allocated. + */ +#define FM10K_TDLEN_ITR_SCALE_SHIFT 9 +#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00 +#define FM10K_TDLEN_ITR_SCALE_GEN1 2 +#define FM10K_TDLEN_ITR_SCALE_GEN2 1 +#define FM10K_TDLEN_ITR_SCALE_GEN3 0 #define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) #define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 #define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 @@ -339,7 +354,7 @@ struct fm10k_hw; #define FM10K_VLAN_TABLE_VID_MAX 4096 #define FM10K_VLAN_TABLE_VSI_MAX 64 #define FM10K_VLAN_LENGTH_SHIFT 16 -#define FM10K_VLAN_CLEAR (1 << 15) +#define FM10K_VLAN_CLEAR BIT(15) #define FM10K_VLAN_ALL \ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) @@ -373,13 +388,13 @@ struct fm10k_hw; #define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) enum fm10k_int_source { - fm10k_int_Mailbox = 0, - fm10k_int_PCIeFault = 1, - fm10k_int_SwitchUpDown = 2, - fm10k_int_SwitchEvent = 3, - fm10k_int_SRAM = 4, - fm10k_int_VFLR = 5, - fm10k_int_MaxHoldTime = 6, + fm10k_int_mailbox = 0, + fm10k_int_pcie_fault = 1, + fm10k_int_switch_up_down = 2, + fm10k_int_switch_event = 3, + fm10k_int_sram = 4, + fm10k_int_vflr = 5, + fm10k_int_max_hold_time = 6, fm10k_int_sources_max_pf }; @@ -535,7 +550,6 @@ struct fm10k_mac_ops { struct fm10k_dglort_cfg *); void (*set_dma_mask)(struct fm10k_hw *, u64); s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); - void (*request_lport_map)(struct fm10k_hw *); s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); u64 (*read_systime)(struct fm10k_hw *); }; @@ -559,6 +573,7 @@ struct fm10k_mac_info { bool get_host_state; bool tx_ready; u32 dglort_map; + u8 itr_scale; }; struct fm10k_swapi_table_info { @@ -644,10 +659,10 @@ enum fm10k_devices { }; struct fm10k_info { - enum fm10k_mac_type mac; - s32 (*get_invariants)(struct fm10k_hw *); - struct fm10k_mac_ops *mac_ops; - struct fm10k_iov_ops *iov_ops; + enum fm10k_mac_type mac; + s32 (*get_invariants)(struct fm10k_hw *); + const struct fm10k_mac_ops *mac_ops; + const struct fm10k_iov_ops *iov_ops; }; struct fm10k_hw { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 36c8b0aa08fd..91f8d7311f3b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -28,7 +28,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) { u8 *perm_addr = hw->mac.perm_addr; - u32 bal = 0, bah = 0; + u32 bal = 0, bah = 0, tdlen; s32 err; u16 i; @@ -48,6 +48,9 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) ((u32)perm_addr[2]); } + /* restore default itr_scale for next VF initialization */ + tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT; + /* The queues have already been disabled so we just need to * update their base address registers */ @@ -56,6 +59,12 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) fm10k_write_reg(hw, FM10K_TDBAH(i), bah); fm10k_write_reg(hw, FM10K_RDBAL(i), bal); fm10k_write_reg(hw, FM10K_RDBAH(i), bah); + /* Restore ITR scale in software-defined mechanism in TDLEN + * for next VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of + * TDLEN here. + */ + fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen); } return 0; @@ -103,7 +112,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) s32 err; u16 i; - /* assume we always have at least 1 queue */ + /* verify we have at least 1 queue */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { /* verify the Descriptor cache offsets are increasing */ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); @@ -119,16 +135,28 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_disable_queues_generic(hw, i); if (err) - return err; + goto reset_max_queues; /* record maximum queue count */ hw->mac.max_queues = i; - /* fetch default VLAN */ + /* fetch default VLAN and ITR scale */ hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + /* Read the ITR scale from TDLEN. See the definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is + * used here. + */ + hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) & + FM10K_TDLEN_ITR_SCALE_MASK) >> + FM10K_TDLEN_ITR_SCALE_SHIFT; return 0; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; } /* This structure defines the attibutes to be parsed below */ @@ -270,7 +298,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, /* verify we are not locked down on the MAC address */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, mac)) return FM10K_ERR_PARAM; /* add bit to notify us if this is a set or clear operation */ @@ -414,6 +442,7 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* generate message requesting to change xcast mode */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); @@ -533,25 +562,25 @@ static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_vf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_vf, - .init_hw = &fm10k_init_hw_vf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_vf, - .update_vlan = &fm10k_update_vlan_vf, - .read_mac_addr = &fm10k_read_mac_addr_vf, - .update_uc_addr = &fm10k_update_uc_addr_vf, - .update_mc_addr = &fm10k_update_mc_addr_vf, - .update_xcast_mode = &fm10k_update_xcast_mode_vf, - .update_int_moderator = &fm10k_update_int_moderator_vf, - .update_lport_state = &fm10k_update_lport_state_vf, - .update_hw_stats = &fm10k_update_hw_stats_vf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, - .configure_dglort_map = &fm10k_configure_dglort_map_vf, - .get_host_state = &fm10k_get_host_state_generic, - .adjust_systime = &fm10k_adjust_systime_vf, - .read_systime = &fm10k_read_systime_vf, +static const struct fm10k_mac_ops mac_ops_vf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_vf, + .init_hw = fm10k_init_hw_vf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_vf, + .update_vlan = fm10k_update_vlan_vf, + .read_mac_addr = fm10k_read_mac_addr_vf, + .update_uc_addr = fm10k_update_uc_addr_vf, + .update_mc_addr = fm10k_update_mc_addr_vf, + .update_xcast_mode = fm10k_update_xcast_mode_vf, + .update_int_moderator = fm10k_update_int_moderator_vf, + .update_lport_state = fm10k_update_lport_state_vf, + .update_hw_stats = fm10k_update_hw_stats_vf, + .rebind_hw_stats = fm10k_rebind_hw_stats_vf, + .configure_dglort_map = fm10k_configure_dglort_map_vf, + .get_host_state = fm10k_get_host_state_generic, + .adjust_systime = fm10k_adjust_systime_vf, + .read_systime = fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) @@ -561,8 +590,8 @@ static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); } -struct fm10k_info fm10k_vf_info = { +const struct fm10k_info fm10k_vf_info = { .mac = fm10k_mac_vf, - .get_invariants = &fm10k_get_invariants_vf, + .get_invariants = fm10k_get_invariants_vf, .mac_ops = &mac_ops_vf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 06a99d794c99..c4439f1313a0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -74,5 +74,5 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; #define FM10K_VF_MSG_1588_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) -extern struct fm10k_info fm10k_vf_info; +extern const struct fm10k_info fm10k_vf_info; #endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4dd3e26129b4..68f2204ec6f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -42,7 +42,6 @@ #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> -#include <linux/tcp.h> #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> @@ -104,6 +103,7 @@ #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) +#define I40E_PRIV_FLAGS_PS BIT(4) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -187,6 +187,7 @@ struct i40e_lump_tracking { #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -244,6 +245,11 @@ struct i40e_tc_configuration { struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; }; +struct i40e_udp_port_config { + __be16 index; + u8 type; +}; + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -265,7 +271,7 @@ struct i40e_pf { u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ - u16 rss_size; /* num queues in the RSS array */ + u16 alloc_rss_size; /* allocated RSS queues */ u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ @@ -280,11 +286,9 @@ struct i40e_pf { u32 fd_atr_cnt; u32 fd_tcp_rule; -#ifdef CONFIG_I40E_VXLAN - __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; - u16 pending_vxlan_bitmap; + struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_udp_bitmap; -#endif enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; @@ -321,9 +325,7 @@ struct i40e_pf { #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) -#ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) -#endif +#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) @@ -335,7 +337,9 @@ struct i40e_pf { #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -412,7 +416,7 @@ struct i40e_pf { u32 rx_hwtstamp_cleared; bool ptp_tx; bool ptp_rx; - u16 rss_table_size; + u16 rss_table_size; /* HW RSS table size */ /* These are only valid in NPAR modes */ u32 npar_max_bw; u32 npar_min_bw; @@ -487,6 +491,7 @@ struct i40e_vsi { u32 tx_restart; u32 tx_busy; u64 tx_linearize; + u64 tx_force_wb; u32 rx_buf_failed; u32 rx_page_failed; @@ -504,8 +509,10 @@ struct i40e_vsi { u16 tx_itr_setting; u16 int_rate_limit; /* value in usecs */ - u16 rss_table_size; - u16 rss_size; + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; u16 rx_hdr_len; @@ -575,6 +582,9 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ +#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */ + unsigned long hung_detected; /* Set/Reset for hung_detection logic */ + cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; @@ -602,8 +612,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw) full_ver = hw->nvm.oem_ver; ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); - build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) - & I40E_OEM_VER_BUILD_MASK); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) & + I40E_OEM_VER_BUILD_MASK); patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), @@ -668,6 +678,8 @@ extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); @@ -691,7 +703,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev); -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); @@ -709,7 +721,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, void i40e_veb_release(struct i40e_veb *veb); int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); -i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); +int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); void i40e_vsi_reset_stats(struct i40e_vsi *vsi); void i40e_pf_reset_stats(struct i40e_pf *pf); @@ -767,6 +779,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0ff8f01e57ee..1fd5ea82a9bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ - if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6584b6cd73fd..b22012a446a6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1891,6 +1892,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ +struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2403,4 +2424,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d7b6..6a034ddac36a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -44,7 +44,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d4b7af9a2fc8..10744a698d6f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -103,8 +103,8 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos += len; return len; @@ -353,8 +353,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -981,12 +981,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) { + if (bytes_not_copied) { kfree(cmd_buf); - return bytes_not_copied; + return -EFAULT; } - if (bytes_not_copied > 0) - count -= bytes_not_copied; cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); @@ -1140,7 +1138,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", @@ -1179,7 +1177,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", @@ -2034,8 +2032,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -2068,10 +2066,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied > 0) - count -= bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; i40e_dbg_netdev_ops_buf[count] = '\0'; buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index c601ca4a610c..448ef4c17efb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 3f385ffe420f..29d5833e24a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -88,6 +88,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), + I40E_VSI_STAT("tx_force_wb", tx_force_wb), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, @@ -230,6 +231,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "LinkPolling", "flow-director-atr", "veb-stats", + "packet-split", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -2110,7 +2112,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = vsi->alloc_queue_pairs; + cmd->data = vsi->num_queue_pairs; ret = 0; break; case ETHTOOL_GRXFH: @@ -2583,7 +2585,6 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } -#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) /** * i40e_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -2611,10 +2612,9 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *lut, *seed = NULL; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -2622,24 +2622,20 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HLUT(i)); - indir[j++] = reg_val & 0xff; - indir[j++] = (reg_val >> 8) & 0xff; - indir[j++] = (reg_val >> 16) & 0xff; - indir[j++] = (reg_val >> 24) & 0xff; - } + seed = key; + lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE); + if (ret) + goto out; + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)(lut[i]); - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HKEY(i)); - key[j++] = (u8)(reg_val & 0xff); - key[j++] = (u8)((reg_val >> 8) & 0xff); - key[j++] = (u8)((reg_val >> 16) & 0xff); - key[j++] = (u8)((reg_val >> 24) & 0xff); - } - } - return 0; +out: + kfree(lut); + + return ret; } /** @@ -2656,10 +2652,8 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *seed = NULL; + u16 i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; @@ -2667,24 +2661,28 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = indir[j++]; - reg_val |= indir[j++] << 8; - reg_val |= indir[j++] << 16; - reg_val |= indir[j++] << 24; - wr32(hw, I40E_PFQF_HLUT(i), reg_val); - } - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = key[j++]; - reg_val |= key[j++] << 8; - reg_val |= key[j++] << 16; - reg_val |= key[j++] << 24; - wr32(hw, I40E_PFQF_HKEY(i), reg_val); + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; } + memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; } - return 0; + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; + } + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40e_config_rss(vsi, seed, vsi->rss_lut_user, + I40E_HLUT_ARRAY_SIZE); } /** @@ -2712,6 +2710,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_FD_ATR : 0; ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? I40E_PRIV_FLAGS_VEB_STATS : 0; + ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? + I40E_PRIV_FLAGS_PS : 0; return ret_flags; } @@ -2726,6 +2726,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + bool reset_required = false; + + /* NOTE: MFP is not settable */ + + /* allow the user to control the method of receive + * buffer DMA, whether the packet is split at header + * boundaries into two separate buffers. In some cases + * one routine or the other will perform better. + */ + if ((flags & I40E_PRIV_FLAGS_PS) && + !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_PS) && + (pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags &= ~I40E_FLAG_RX_PS_ENABLED; + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; @@ -2748,6 +2768,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) else pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index fe5d9bf3ed6d..579a46ca82df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1544,8 +1544,6 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) return; - BUG_ON(!pf->vsi[pf->lan_vsi]); - for (i = 0; i < pf->num_alloc_vsi; i++) { vsi = pf->vsi[i]; if (vsi && vsi->type == I40E_VSI_FCOE) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 79ae7beeafe5..daa9204426d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -762,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = BIT(ce_info->width) - 1; + mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b825f978d441..bb4612c159fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -24,12 +24,24 @@ * ******************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/of_net.h> +#include <linux/pci.h> + +#ifdef CONFIG_SPARC +#include <asm/idprom.h> +#include <asm/prom.h> +#endif + /* Local includes */ #include "i40e.h" #include "i40e_diag.h" -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) #include <net/vxlan.h> #endif +#if IS_ENABLED(CONFIG_GENEVE) +#include <net/geneve.h> +#endif const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -38,8 +50,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 46 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 8 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -55,6 +67,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); @@ -68,7 +82,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, @@ -790,75 +803,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) #endif /** - * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in link flow control mode - **/ -static void i40e_update_link_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - struct i40e_hw *hw = &pf->hw; - u64 xoff = 0; - - if ((hw->fc.current_mode != I40E_FC_FULL) && - (hw->fc.current_mode != I40E_FC_RX_PAUSE)) - return; - - xoff = nsd->link_xoff_rx; - i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), - pf->stat_offsets_loaded, - &osd->link_xoff_rx, &nsd->link_xoff_rx); - - /* No new LFC xoff rx */ - if (!(nsd->link_xoff_rx - xoff)) - return; - -} - -/** - * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in PFC mode - **/ -static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; - struct i40e_dcbx_config *dcb_cfg; - struct i40e_hw *hw = &pf->hw; - u16 i; - u8 tc; - - dcb_cfg = &hw->local_dcbx_config; - - /* Collect Link XOFF stats when PFC is disabled */ - if (!dcb_cfg->pfc.pfcenable) { - i40e_update_link_xoff_rx(pf); - return; - } - - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - u64 prio_xoff = nsd->priority_xoff_rx[i]; - - i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), - pf->stat_offsets_loaded, - &osd->priority_xoff_rx[i], - &nsd->priority_xoff_rx[i]); - - /* No new PFC xoff rx */ - if (!(nsd->priority_xoff_rx[i] - prio_xoff)) - continue; - /* Get the TC for given priority */ - tc = dcb_cfg->etscfg.prioritytable[i]; - xoff[tc] = true; - } -} - -/** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * @@ -881,6 +825,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 bytes, packets; unsigned int start; u64 tx_linearize; + u64 tx_force_wb; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -899,7 +844,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) */ rx_b = rx_p = 0; tx_b = tx_p = 0; - tx_restart = tx_busy = tx_linearize = 0; + tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -917,6 +862,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; + tx_force_wb += p->tx_stats.tx_force_wb; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -934,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; + vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1049,12 +996,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); - i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ + i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_rx[i], + &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], @@ -1317,6 +1270,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, } /** + * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * @vsi: the VSI to be searched + * @macaddr: the mac address to be removed + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Removes a given MAC address from a VSI, regardless of VLAN + * + * Returns 0 for success, or error + **/ +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f = NULL; + int changed = 0; + + WARN(!spin_is_locked(&vsi->mac_filter_list_lock), + "Missing mac_filter_list_lock\n"); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (is_vf == f->is_vf) && + (is_netdev == f->is_netdev)) { + f->counter--; + f->changed = true; + changed = 1; + } + } + if (changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + return 0; + } + return -ENOENT; +} + +/** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address @@ -1547,10 +1536,9 @@ static int i40e_set_mac(struct net_device *netdev, void *p) spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); - return 0; + return i40e_sync_vsi_filters(vsi); } /** @@ -1590,7 +1578,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ + if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1619,13 +1607,14 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: - qcount = min_t(int, pf->rss_size, num_tc_qps); + qcount = min_t(int, pf->alloc_rss_size, + num_tc_qps); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: @@ -1851,13 +1840,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list) /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI - * @grab_rtnl: whether RTNL needs to be grabbed * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) +int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; @@ -1865,8 +1853,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + i40e_status aq_ret = 0; bool err_cond = false; - i40e_status ret = 0; + int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; @@ -1929,17 +1918,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } spin_unlock_bh(&vsi->mac_filter_list_lock); - if (err_cond) + if (err_cond) { i40e_cleanup_add_list(&tmp_add_list); + retval = -ENOMEM; + goto out; + } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { + int del_list_size; + filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); + del_list_size = filter_list_len * + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kzalloc(del_list_size, GFP_KERNEL); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -1948,7 +1942,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { @@ -1966,18 +1961,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_del == filter_list_len) { - ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, + del_list, + num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - memset(del_list, 0, sizeof(*del_list)); + memset(del_list, 0, del_list_size); - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { + retval = -EIO; dev_err(&pf->pdev->dev, "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); + } } /* Release memory for MAC filter entries which were * synced up with HW. @@ -1987,15 +1986,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_del) { - ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + del_list, num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); } @@ -2004,13 +2004,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (!list_empty(&tmp_add_list)) { + int add_list_size; /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_add_macvlan_element_data), - GFP_KERNEL); + add_list_size = filter_list_len * + sizeof(struct i40e_aqc_add_macvlan_element_data); + add_list = kzalloc(add_list_size, GFP_KERNEL); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2019,7 +2020,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { @@ -2040,15 +2042,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_add == filter_list_len) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (ret) + if (aq_ret) break; - memset(add_list, 0, sizeof(*add_list)); + memset(add_list, 0, add_list_size); } /* Entries from tmp_add_list were cloned from MAC * filter list, hence clean those cloned entries @@ -2058,18 +2060,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_add) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { + if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { + retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, @@ -2087,16 +2090,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; @@ -2112,44 +2118,50 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - if (grab_rtnl) - i40e_do_reset_safe(pf, - BIT(__I40E_PF_RESET_REQUESTED)); - else - i40e_do_reset(pf, - BIT(__I40E_PF_RESET_REQUESTED)); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } } else { - ret = i40e_aq_set_vsi_unicast_promiscuous( + aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set unicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); - ret = i40e_aq_set_vsi_multicast_promiscuous( + aq_ret, pf->hw.aq.asq_last_status); + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + aq_ret, pf->hw.aq.asq_last_status); + } } - ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } - +out: clear_bit(__I40E_CONFIG_BUSY, &vsi->state); - return 0; + return retval; } /** @@ -2166,8 +2178,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) - i40e_sync_vsi_filters(pf->vsi[v], true); + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + int ret = i40e_sync_vsi_filters(pf->vsi[v]); + + if (ret) { + /* come back and try again later */ + pf->flags |= I40E_FLAG_FILTER_SYNC; + break; + } + } } } @@ -2377,16 +2396,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function will lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2459,16 +2475,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function with lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2711,6 +2724,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) netif_set_xps_queue(ring->netdev, mask, ring->queue_index); free_cpumask_var(mask); } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -4360,17 +4378,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + /* Bail out if interrupts are disabled because napi_poll + * execution in-progress or will get scheduled soon. + * napi_poll cleans TX and RX queues and updates 'next_to_clean'. + */ + if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) + return; + head = i40e_get_head(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring); - /* Interrupts are disabled and TX pending is non-zero, - * trigger the SW interrupt (don't wait). Worst case - * there will be one extra interrupt which may result - * into not cleaning any queues because queues are cleaned. + /* HW is done executing descriptors, updated HEAD write back, + * but SW hasn't processed those descriptors. If interrupt is + * not generated from this point ON, it could result into + * dev_watchdog detecting timeout on those netdev_queue, + * hence proactively trigger SW interrupt. */ - if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) - i40e_force_wb(vsi, tx_ring->q_vector); + if (tx_pending) { + /* NAPI Poll didn't run and clear since it was set */ + if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected)) { + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending, + tx_ring->next_to_clean, head, + tx_ring->next_to_use, + readl(tx_ring->tail)); + netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", + vsi->seid, q_idx, val); + i40e_force_wb(vsi, tx_ring->q_vector); + } else { + /* First Chance - detected possible hung */ + set_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected); + } + } } /** @@ -4441,7 +4483,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= BIT_ULL(tc); + enabled_tc |= BIT(tc); break; } } @@ -4525,7 +4567,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) num_tc++; } return num_tc; @@ -4547,7 +4589,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) break; } @@ -4707,7 +4749,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) + if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4769,7 +4811,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_share[i] = 1; } @@ -4843,7 +4885,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -5240,7 +5282,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= BIT_ULL(i); + enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -5305,6 +5347,9 @@ int i40e_open(struct net_device *netdev) #ifdef CONFIG_I40E_VXLAN vxlan_get_rx_port(netdev); #endif +#ifdef CONFIG_I40E_GENEVE + geneve_get_rx_port(netdev); +#endif return 0; } @@ -5738,7 +5783,7 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, **/ static void i40e_service_event_complete(struct i40e_pf *pf) { - BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); + WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); @@ -6013,6 +6058,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; + /* save off old link status information */ + pf->hw.phy.link_info_old = pf->hw.phy.link_info; + /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; @@ -6101,23 +6149,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); + reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); + reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); + reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -6147,13 +6195,9 @@ unlock: static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_hw *hw = &pf->hw; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; - /* save off old link status information */ - hw->phy.link_info_old = hw->phy.link_info; - /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information @@ -6192,15 +6236,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { - dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) @@ -6209,15 +6256,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { - dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) @@ -6268,6 +6318,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: + case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); break; default: @@ -6685,6 +6736,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; + u32 val; u32 v; /* Now we wait for GRST to settle out. @@ -6823,6 +6875,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ +#define I40E_REG_MSS 0x000E64DC +#define I40E_REG_MSS_MIN_MASK 0x3FF0000 +#define I40E_64BYTE_MSS 0x400000 + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -6984,30 +7050,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } -#ifdef CONFIG_I40E_VXLAN /** - * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ -static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; int i; - if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) return; - pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { - pf->pending_vxlan_bitmap &= ~BIT_ULL(i); - port = pf->vxlan_ports[i]; + if (pf->pending_udp_bitmap & BIT_ULL(i)) { + pf->pending_udp_bitmap &= ~BIT_ULL(i); + port = pf->udp_ports[i].index; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), - I40E_AQC_TUNNEL_TYPE_VXLAN, + pf->udp_ports[i].type, NULL, NULL); else ret = i40e_aq_del_udp_tunnel(hw, i, NULL); @@ -7020,13 +7086,13 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->vxlan_ports[i] = 0; + pf->udp_ports[i].index = 0; } } } +#endif } -#endif /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data @@ -7051,8 +7117,8 @@ static void i40e_service_task(struct work_struct *work) i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#ifdef CONFIG_I40E_VXLAN - i40e_sync_vxlan_filters_subtask(pf); +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) + i40e_sync_udp_filters_subtask(pf); #endif i40e_clean_adminq_subtask(pf); @@ -7282,6 +7348,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) } /** + * i40e_clear_rss_config_user - clear the user configured RSS hash keys + * and lookup table + * @vsi: Pointer to VSI structure + */ +static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + +/** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ @@ -7318,6 +7401,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_vsi_free_arrays(vsi, true); + i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) @@ -7780,7 +7864,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) * @vsi: vsi structure * @seed: RSS hash seed **/ -static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; @@ -7833,43 +7918,57 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_pf *pf = vsi->back; + u8 *lut; + int ret; - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + return 0; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(vsi, seed); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; - return 0; + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** - * i40e_config_rss_reg - Prepare for RSS if used - * @pf: board private structure + * i40e_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure **/ -static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) +static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 *seed_dw = (u32 *)seed; - u32 current_queue = 0; - u32 lut = 0; - int i, j; + u8 i; /* Fill out hash function seed */ - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + if (seed) { + u32 *seed_dw = (u32 *)seed; - for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (current_queue == vsi->rss_size) - current_queue = 0; - lut |= ((current_queue) << (8 * j)); - current_queue++; - } - wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } i40e_flush(hw); @@ -7877,18 +7976,101 @@ static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) } /** - * i40e_config_rss - Prepare for RSS if used + * i40e_get_rss_reg - Get the RSS keys and lut by reading registers + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); + } + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40e_config_rss - Configure RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + */ +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_get_rss - Get RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + return i40e_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_fill_rss_lut - Fill the RSS lookup table with default values + * @pf: Pointer to board private structure + * @lut: Lookup table + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + */ +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ -static int i40e_config_rss(struct i40e_pf *pf) +static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; + u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; - - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7898,8 +8080,6 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? @@ -7907,10 +8087,32 @@ static int i40e_config_rss(struct i40e_pf *pf) (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + /* Determine the RSS size of the VSI */ + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else - return i40e_config_rss_reg(pf, seed); + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + + /* Use user configured hash key if there is one, otherwise + * use default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** @@ -7935,13 +8137,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = new_rss_size; + pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); - i40e_config_rss(pf); + + /* Discard the user configured hash keys and lut, if less + * queues are enabled. + */ + if (queue_count < vsi->rss_size) { + i40e_clear_rss_config_user(vsi); + dev_dbg(&pf->pdev->dev, + "discard user configured hash keys and lut\n"); + } + + /* Reset vsi->rss_size, as number of enabled queues changed */ + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + i40e_pf_config_rss(pf); } - dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); - return pf->rss_size; + dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", + pf->alloc_rss_size, pf->rss_size_max); + return pf->alloc_rss_size; } /** @@ -8112,13 +8329,14 @@ static int i40e_sw_init(struct i40e_pf *pf) * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); - pf->rss_size = 1; + pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; - pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + pf->alloc_rss_size = min_t(int, pf->rss_size_max, + num_online_cpus()); } /* MFP mode enabled */ @@ -8176,7 +8394,8 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_HW_ATR_EVICT_CAPABLE | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; @@ -8275,26 +8494,27 @@ static int i40e_set_features(struct net_device *netdev, return 0; } -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** - * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure * @port: The UDP port to look up * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ -static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->vxlan_ports[i] == port) + if (pf->udp_ports[i].index == port) return i; } return i; } +#endif /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8304,6 +8524,7 @@ static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { +#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8313,7 +8534,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { @@ -8323,7 +8544,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* Now check if there is space to add the new port */ - next_idx = i40e_get_vxlan_port_idx(pf, 0); + next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", @@ -8332,9 +8553,11 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +#endif } /** @@ -8346,6 +8569,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { +#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8354,23 +8578,108 @@ static void i40e_del_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } +#endif +} + +/** + * i40e_add_geneve_port - Get notifications about GENEVE ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: New UDP port number that GENEVE started listening to + **/ +static void i40e_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ +#if IS_ENABLED(CONFIG_GENEVE) + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "udp port %d already offloaded\n", + ntohs(port)); + return; + } + + /* Now check if there is space to add the new port */ + next_idx = i40e_get_udp_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", + ntohs(port)); + return; + } + + /* New port: add it and mark its index in the bitmap */ + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); +#endif } +/** + * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: UDP port number that GENEVE stopped listening to + **/ +static void i40e_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ +#if IS_ENABLED(CONFIG_GENEVE) + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting geneve port %d\n", + ntohs(port)); + } else { + netdev_warn(netdev, "geneve port %d was not found, not deleting\n", + ntohs(port)); + } #endif +} + static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { @@ -8548,7 +8857,10 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, nlflags, 0, 0, filter_mask, NULL); } -#define I40E_MAX_TUNNEL_HDR_LEN 80 +/* Hardware supports L4 tunnel length of 128B (=2^7) which includes + * inner mac plus all inner ethertypes. + */ +#define I40E_MAX_TUNNEL_HDR_LEN 128 /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -8560,9 +8872,9 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb->encapsulation && - (skb_inner_mac_header(skb) - skb_transport_header(skb) > + ((skb_inner_network_header(skb) - skb_transport_header(skb)) > I40E_MAX_TUNNEL_HDR_LEN)) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } @@ -8595,10 +8907,14 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = i40e_add_geneve_port, + .ndo_del_geneve_port = i40e_del_geneve_port, +#endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@ -8632,13 +8948,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -9051,7 +9368,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) f->is_vf, f->is_netdev); spin_unlock_bh(&vsi->mac_filter_list_lock); - i40e_sync_vsi_filters(vsi, false); + i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); @@ -9213,6 +9530,44 @@ err_vsi: } /** + * i40e_macaddr_init - explicitly write the mac address filters. + * + * @vsi: pointer to the vsi. + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + +/** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type @@ -9336,6 +9691,17 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: + /* Apply relevant filters if a platform-specific mac + * address was selected. + */ + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (ret) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", + ret); + } + } case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); @@ -9947,7 +10313,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) - i40e_config_rss(pf); + i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_update_link_info(&pf->hw); @@ -9985,7 +10351,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10002,7 +10368,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10072,8 +10438,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), - pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, - pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); + pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, + pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, + queues_left); #ifdef I40E_FCOE dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif @@ -10111,55 +10478,86 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) } #define INFO_STRING_LEN 255 +#define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - char *buf, *string; + char *buf; + int i; - string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); - if (!string) { - dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!buf) return; - } - buf = string; - - buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); + i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV - buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); + i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs, - pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) - buf += sprintf(buf, "RSS "); + i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) - buf += sprintf(buf, "FD_ATR "); + i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { - buf += sprintf(buf, "FD_SB "); - buf += sprintf(buf, "NTUPLE "); + i += snprintf(&buf[i], REMAIN(i), " FD_SB"); + i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) - buf += sprintf(buf, "DCB "); + i += snprintf(&buf[i], REMAIN(i), " DCB"); #if IS_ENABLED(CONFIG_VXLAN) - buf += sprintf(buf, "VxLAN "); + i += snprintf(&buf[i], REMAIN(i), " VxLAN"); +#endif +#if IS_ENABLED(CONFIG_GENEVE) + i += snprintf(&buf[i], REMAIN(i), " Geneve"); #endif if (pf->flags & I40E_FLAG_PTP) - buf += sprintf(buf, "PTP "); + i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) - buf += sprintf(buf, "FCOE "); + i += snprintf(&buf[i], REMAIN(i), " FCOE"); #endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) - buf += sprintf(buf, "VEB "); + i += snprintf(&buf[i], REMAIN(i), " VEB"); else - buf += sprintf(buf, "VEPA "); + i += snprintf(&buf[i], REMAIN(i), " VEPA"); + + dev_info(&pf->pdev->dev, "%s\n", buf); + kfree(buf); + WARN_ON(i > INFO_STRING_LEN); +} + +/** + * i40e_get_platform_mac_addr - get platform-specific MAC address + * + * @pdev: PCI device information struct + * @pf: board private structure + * + * Look up the MAC address in Open Firmware on systems that support it, + * and use IDPROM on SPARC if no OF address is found. On return, the + * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value + * has been selected. + **/ +static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) +{ + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + u8 *mac_addr = pf->hw.mac.addr; - BUG_ON(buf > (string + INFO_STRING_LEN)); - dev_info(&pf->pdev->dev, "%s\n", string); - kfree(string); + pf->flags &= ~I40E_FLAG_PF_MAC; + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(mac_addr, addr); + pf->flags |= I40E_FLAG_PF_MAC; +#ifdef CONFIG_SPARC + } else { + ether_addr_copy(mac_addr, idprom->id_ethaddr); + pf->flags |= I40E_FLAG_PF_MAC; +#endif /* CONFIG_SPARC */ + } } /** @@ -10183,6 +10581,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 link_status; int err; u32 len; + u32 val; u32 i; u8 set_fc_aq_fail; @@ -10295,7 +10694,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + err = i40e_init_adminq(hw); + if (err) { + if (err == I40E_ERR_FIRMWARE_API_VERSION) + dev_info(&pdev->dev, + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + else + dev_info(&pdev->dev, + "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); + + goto err_pf_reset; + } /* provide nvm, fw, api versions */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", @@ -10303,12 +10718,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw)); - if (err) { - dev_info(&pdev->dev, - "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); - goto err_pf_reset; - } - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) dev_info(&pdev->dev, @@ -10361,6 +10770,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } i40e_get_mac_addr(hw, hw->mac.addr); + /* allow a platform config to override the HW addr */ + i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -10405,7 +10816,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) + if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; @@ -10487,6 +10898,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -10697,7 +11119,6 @@ static void i40e_remove(struct pci_dev *pdev) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); - i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); @@ -10740,6 +11161,10 @@ static void i40e_remove(struct pci_dev *pdev) "Failed to destroy the Admin Queue resources: %d\n", ret_code); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 635b3ac17877..720516b0e8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -322,7 +328,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * - * Always returns -EOPNOTSUPP + * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, @@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, } } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -506,9 +515,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } - } else { - dev_info(&pdev->dev, - "FD filter programming failed due to incorrect filter parameters\n"); } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -526,11 +532,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -542,6 +544,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -1374,7 +1380,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check + /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check * it in the driver, hardware does not do it for us. * Since L3L4P bit was set we assume a valid IHL value (>=5) * so the total length of IPv4 header is IHL*4 bytes @@ -1416,31 +1422,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1458,6 +1445,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1606,8 +1617,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1632,7 +1643,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1736,8 +1746,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1864,7 +1873,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1892,12 +1900,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) return 0; } + /* Clear hung_detected bit */ + clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected); /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1926,8 +1936,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40e_force_wb(vsi, q_vector); + } return budget; } @@ -1993,7 +2005,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); @@ -2078,7 +2090,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) dtype_cmd |= ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & @@ -2187,14 +2199,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_type_cmd_tso_mss: ptr to u64 object - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2247,7 +2257,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information - * @cd_type_cmd_tso_mss: ptr to u64 object + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2313,7 +2323,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, oudph = udp_hdr(skb); oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; @@ -2807,6 +2817,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, int tsyn; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2826,8 +2839,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 6779fb771d6a..3f081e25e097 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -163,7 +163,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -202,6 +202,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index ae879826084b..3226946bf3d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 44462b40f2d7..63e62f9aec6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -290,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, next_q = find_first_bit(&linklistmap, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)); - vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; - qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); @@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF MAC addr\n"); + if (is_valid_ether_addr(vf->default_lan_addr.addr)) { + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not add MAC filter %pM for VF %d\n", + vf->default_lan_addr.addr, vf->vf_id); + } f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -565,7 +568,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) } /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi, false); + ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -1094,8 +1097,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; - dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", - v_opcode, v_retval); + dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", + vf->vf_id, v_opcode, v_retval); if (vf->num_invalid_msgs > I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { dev_err(&pf->pdev->dev, @@ -1623,7 +1626,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!f) { dev_err(&pf->pdev->dev, - "Unable to add VF MAC filter\n"); + "Unable to add MAC filter %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; @@ -1632,8 +1636,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1669,8 +1675,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < al->num_elements; i++) { if (is_broadcast_ether_addr(al->list[i].addr) || is_zero_ether_addr(al->list[i].addr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", - al->list[i].addr); + dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } @@ -1680,13 +1686,19 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - i40e_del_filter(vsi, al->list[i].addr, - I40E_VLAN_ANY, true, false); + if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_list_lock); + goto error_param; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1740,8 +1752,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to add VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to add VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -1792,8 +1804,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to delete VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to delete VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -2066,15 +2078,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, - "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } - if (!is_valid_ether_addr(mac)) { + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, - "Invalid VF ethernet address\n"); + "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); ret = -EINVAL; goto error_param; } @@ -2085,9 +2097,10 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); + if (!is_zero_ether_addr(vf->default_lan_addr.addr)) + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); /* Delete all the filters for this VSI - we're going to kill it * anyway. @@ -2099,7 +2112,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ - if (i40e_sync_vsi_filters(vsi, false)) { + if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; @@ -2150,8 +2163,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_pvid; } @@ -2270,8 +2284,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error; } @@ -2344,8 +2359,9 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } @@ -2460,6 +2476,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; + goto out; + } if (enable == vf->spoofchk) goto out; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index fd123ca60761..3f65e39b3fe4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ - if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index fcb9ef34cc7a..f5b2b369dc7c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1888,6 +1889,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ + struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2311,4 +2332,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 72b1942a94aa..938783e0baac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -44,7 +44,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index e6a39c9862e8..ca8b58c3d1f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 47e9a90d6b10..7a00657dacda 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -127,17 +127,24 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of + * i40evf_get_tx_pending - how many Tx descriptors not processed + * @tx_ring: the ring of descriptors * - * Returns value of Tx ring head based on value stored - * in head write-back location + * Since there is no access to the ring head register + * in XL710, we need to use our local copies **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring) { - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); - return le32_to_cpu(*(volatile __le32 *)head); + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } #define WB_STRIDE 0x3 @@ -245,16 +252,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -414,7 +411,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) return false; } -/* +/** * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -889,31 +886,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -931,6 +909,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1071,8 +1073,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1090,7 +1092,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1189,8 +1190,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1263,10 +1263,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, rx = i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { tx = i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } + if (rx || tx) { /* get the higher of the two ITR adjustments and * use the same value for both ITR registers @@ -1302,7 +1304,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1335,7 +1336,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1364,8 +1365,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40evf_force_wb(vsi, q_vector); + } return budget; } @@ -1437,13 +1440,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1555,7 +1557,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_IPV6; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { @@ -1654,7 +1655,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } - /** +/** * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information @@ -1770,6 +1771,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -1810,6 +1814,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1829,6 +1835,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1843,35 +1851,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ -#define WB_STRIDE 0x3 - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -1881,15 +1860,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) - writel(i, tx_ring->tail); - else + if (!tail_bump) prefetchw(tx_desc + 1); + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, tx_ring->tail); + } + return; dma_error: @@ -1961,6 +1997,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u8 hdr_len = 0; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -1980,8 +2019,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2029,7 +2067,7 @@ out_drop: netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; + struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index ebc1bf77f036..e29bb3e86cfd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -201,6 +201,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { @@ -267,6 +268,8 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) @@ -321,4 +324,19 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); +u32 i40evf_get_tx_pending(struct i40e_ring *ring); + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: Tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 9f7b279b9d9c..3b9d2037456c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 22fc3d49c4b9..be1b72b93888 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,6 +67,8 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -95,10 +97,10 @@ struct i40e_vsi { #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define MAX_RX_QUEUES 8 -#define MAX_TX_QUEUES MAX_RX_QUEUES +#define MAX_QUEUES 16 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -142,9 +144,6 @@ struct i40e_q_vector { #define OTHER_VECTOR 1 #define NONQ_VECS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 4 -#define MAX_MSIX_COUNT 5 - #define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) @@ -190,19 +189,19 @@ struct i40evf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work init_task; - struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; /* TX */ - struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *tx_rings; u32 tx_timeout_count; struct list_head mac_filter_list; u32 tx_desc_count; /* RX */ - struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *rx_rings; u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; @@ -313,4 +312,8 @@ void i40evf_request_reset(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 4790437a50ac..a4c9feb589e7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, data[i] = *(u64 *)p; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j]->stats.packets; - data[i++] = adapter->tx_rings[j]->stats.bytes; + data[i++] = adapter->tx_rings[j].stats.packets; + data[i++] = adapter->tx_rings[j].stats.bytes; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j]->stats.packets; - data[i++] = adapter->rx_rings[j]->stats.bytes; + data[i++] = adapter->rx_rings[j].stats.packets; + data[i++] = adapter->rx_rings[j].stats.bytes; } } @@ -351,7 +351,7 @@ static int i40evf_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { - q_vector = adapter->q_vector[i]; + q_vector = &adapter->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); @@ -634,25 +634,34 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL, *lut; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; - if (indir) { - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; - } - } - return 0; + seed = key; + + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + if (ret) + goto out; + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)lut[i]; + +out: + kfree(lut); + + return ret; } /** @@ -668,9 +677,9 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL; + u16 i; /* We do not allow change in unsupported parameters */ if (key || @@ -679,15 +688,29 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = indir[j++]; - hlut_val |= indir[j++] << 8; - hlut_val |= indir[j++] << 16; - hlut_val |= indir[j++] << 24; - wr32(hw, I40E_VFQF_HLUT(i), hlut_val); + if (key) { + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; + } + memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; + } + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; } - return 0; + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, + I40EVF_HLUT_ARRAY_SIZE); } static const struct ethtool_ops i40evf_ethtool_ops = { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d962164dfb0f..a0bbb6b9f5aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,15 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.33" +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 4 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) \ + DRV_KERN const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -259,7 +267,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; - uint32_t dyn_ctl; + u32 dyn_ctl; if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); @@ -307,10 +315,9 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40e_hw *hw = &adapter->hw; u32 val; - /* handle non-queue interrupts */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); - + /* handle non-queue interrupts, these reads clear the registers */ + val = rd32(hw, I40E_VFINT_ICR01); + val = rd32(hw, I40E_VFINT_ICR0_ENA1); val = rd32(hw, I40E_VFINT_DYN_CTL01) | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; @@ -348,8 +355,8 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) static void i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@ -369,8 +376,8 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) static void i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@ -465,7 +472,7 @@ static void i40evf_netpoll(struct net_device *netdev) return; for (i = 0; i < q_vectors; i++) - i40evf_msix_clean_rings(0, adapter->q_vector[i]); + i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); } #endif @@ -487,7 +494,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = adapter->q_vector[vector]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -532,7 +539,7 @@ free_queue_irqs: adapter->msix_entries[vector + NONQ_VECS].vector, NULL); free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, - adapter->q_vector[vector]); + &adapter->q_vectors[vector]); } return err; } @@ -582,7 +589,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) irq_set_affinity_hint(adapter->msix_entries[i+1].vector, NULL); free_irq(adapter->msix_entries[i+1].vector, - adapter->q_vector[i]); + &adapter->q_vectors[i]); } } @@ -611,7 +618,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) int i; for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); } /** @@ -656,8 +663,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) } for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i]->rx_buf_len = rx_buf_len; + adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; } } @@ -954,7 +961,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; napi_enable(napi); } @@ -971,7 +978,7 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi_disable(&q_vector->napi); } } @@ -992,7 +999,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = adapter->rx_rings[i]; + struct i40e_ring *ring = &adapter->rx_rings[i]; i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; @@ -1112,16 +1119,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) **/ static void i40evf_free_queues(struct i40evf_adapter *adapter) { - int i; - if (!adapter->vsi_res) return; - for (i = 0; i < adapter->num_active_queues; i++) { - if (adapter->tx_rings[i]) - kfree_rcu(adapter->tx_rings[i], rcu); - adapter->tx_rings[i] = NULL; - adapter->rx_rings[i] = NULL; - } + kfree(adapter->tx_rings); + kfree(adapter->rx_rings); } /** @@ -1136,13 +1137,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; + adapter->tx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + adapter->rx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); - if (!tx_ring) - goto err_out; + tx_ring = &adapter->tx_rings[i]; tx_ring->queue_index = i; tx_ring->netdev = adapter->netdev; @@ -1150,14 +1158,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->count = adapter->tx_desc_count; if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; - adapter->tx_rings[i] = tx_ring; - rx_ring = &tx_ring[1]; + rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - adapter->rx_rings[i] = rx_ring; } return 0; @@ -1207,115 +1213,273 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = i40evf_acquire_msix_vectors(adapter, v_budget); out: - adapter->netdev->real_num_tx_queues = pairs; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); return err; } /** - * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { - struct i40e_aqc_get_set_rss_key_data rss_key; struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - int ret = 0, i; - u8 *rss_lut; + int ret = 0; if (!vsi->id) - return; + return -EINVAL; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", adapter->current_op); - return; + return -EBUSY; } - memset(&rss_key, 0, sizeof(rss_key)); - memcpy(&rss_key, seed, sizeof(rss_key)); + if (seed) { + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); - if (!rss_lut) - return; + if (lut) { + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) - rss_lut[i] = i % adapter->num_active_queues; + return ret; +} - ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return; +/** + * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); } - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, - (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); - if (ret) - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); + } + i40e_flush(hw); + + return 0; } /** - * i40e_configure_rss_reg - Prepare for RSS if used - * @adapter: board private structure - * @seed: RSS hash seed + * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, - const u8 *seed) +static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { + struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - u32 *seed_dw = (u32 *)seed; - u32 cqueue = 0; - u32 lut = 0; - int i, j; + int ret = 0; - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->num_active_queues) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; + if (seed) { + ret = i40evf_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; } - wr32(hw, I40E_VFQF_HLUT(i), lut); } - i40e_flush(hw); + + if (lut) { + ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } + + return ret; +} + +/** + * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40evf_config_rss - Configure RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_get_rss - Get RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_fill_rss_lut - Fill the lut with default values + * @lut: Lookup table to be filled with + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + **/ +static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; } /** - * i40evf_configure_rss - Prepare for RSS + * i40evf_init_rss - Prepare for RSS * @adapter: board private structure + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) +static int i40evf_init_rss(struct i40evf_adapter *adapter) { + struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; u8 seed[I40EVF_HKEY_ARRAY_SIZE]; u64 hena; - - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + u8 *lut; + int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - if (RSS_AQ(adapter)) - i40evf_configure_rss_aq(&adapter->vsi, seed); + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); else - i40evf_configure_rss_reg(adapter, seed); + i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, + adapter->num_active_queues); + + /* Use user configured hash key if there is one, otherwise + * user default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + kfree(lut); + + return ret; } /** @@ -1327,21 +1491,22 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) **/ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) { - int q_idx, num_q_vectors; + int q_idx = 0, num_q_vectors; struct i40e_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), + GFP_KERNEL); + if (!adapter->q_vectors) + goto err_out; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; + q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, i40evf_napi_poll, NAPI_POLL_WEIGHT); - adapter->q_vector[q_idx] = q_vector; } return 0; @@ -1349,11 +1514,10 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) err_out: while (q_idx) { q_idx--; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; } + kfree(adapter->q_vectors); return -ENOMEM; } @@ -1374,13 +1538,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; - - adapter->q_vector[q_idx] = NULL; + struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); - kfree(q_vector); } + kfree(adapter->q_vectors); } /** @@ -1439,6 +1601,22 @@ err_set_interrupt: } /** + * i40evf_clear_rss_config_user - Clear user configurations of RSS + * @vsi: Pointer to VSI structure + **/ +static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + +/** * i40evf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ @@ -1565,7 +1743,7 @@ static void i40evf_watchdog_task(struct work_struct *work) * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } @@ -1864,9 +2042,12 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->tx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->tx_rings[i]->desc) - i40evf_free_tx_resources(adapter->tx_rings[i]); + if (adapter->tx_rings[i].desc) + i40evf_free_tx_resources(&adapter->tx_rings[i]); } /** @@ -1884,8 +2065,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->tx_rings[i]->count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1911,8 +2092,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1932,9 +2113,12 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->rx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->rx_rings[i]->desc) - i40evf_free_rx_resources(adapter->rx_rings[i]); + if (adapter->rx_rings[i].desc) + i40evf_free_rx_resources(&adapter->rx_rings[i]); } /** @@ -2137,7 +2321,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) netdev->features |= NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | @@ -2263,6 +2447,14 @@ static void i40evf_init_task(struct work_struct *work) if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { err = i40evf_send_vf_config_msg(adapter); goto err; + } else if (err == I40E_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + i40evf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", @@ -2313,7 +2505,7 @@ static void i40evf_init_task(struct work_struct *work) I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; if (!RSS_AQ(adapter)) - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2334,7 +2526,6 @@ static void i40evf_init_task(struct work_struct *work) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - dev_info(&pdev->dev, "%s\n", i40evf_driver_string); adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); @@ -2343,7 +2534,7 @@ static void i40evf_init_task(struct work_struct *work) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); } return; restart: @@ -2438,8 +2629,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - MAX_TX_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -2476,6 +2666,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -2626,9 +2822,16 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); + /* Clear user configurations for RSS */ + i40evf_clear_rss_config_user(&adapter->vsi); + if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 32e620e1eb5c..c1c526283757 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -157,7 +157,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) @@ -242,7 +244,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); - vqci = kzalloc(len, GFP_ATOMIC); + vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -255,19 +257,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) for (i = 0; i < pairs; i++) { vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; - vqpi->txq.ring_len = adapter->tx_rings[i]->count; - vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; + vqpi->txq.ring_len = adapter->tx_rings[i].count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; vqpi->txq.headwb_enabled = 1; vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = adapter->rx_rings[i]->count; - vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; + vqpi->rxq.ring_len = adapter->rx_rings[i].count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; - vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; vqpi++; } @@ -353,14 +355,14 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_irq_map_info) + (adapter->num_msix_vectors * sizeof(struct i40e_virtchnl_vector_map)); - vimi = kzalloc(len, GFP_ATOMIC); + vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; vimi->num_vectors = adapter->num_msix_vectors; /* Queue vectors first */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; + q_vector = adapter->q_vectors + v_idx; vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; @@ -391,6 +393,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; struct i40evf_mac_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -415,10 +418,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -431,7 +436,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -450,6 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; struct i40evf_mac_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -474,9 +481,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -490,7 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -509,6 +519,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct i40evf_vlan_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -534,9 +545,11 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -549,7 +562,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -567,6 +581,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; struct i40evf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -592,9 +607,11 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -608,7 +625,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -724,9 +742,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, i40evf_stat_str(&adapter->hw, v_retval), - v_opcode); + switch (v_opcode) { + case I40E_VIRTCHNL_OP_ADD_VLAN: + dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, + i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); + } } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 7a73510e547c..adb33e2a0137 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -45,8 +45,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); -static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); @@ -205,13 +203,10 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case e1000_82580: case e1000_i350: case e1000_i354: - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; - break; case e1000_i210: case e1000_i211: - phy->ops.read_reg = igb_read_phy_reg_gs40g; - phy->ops.write_reg = igb_write_phy_reg_gs40g; + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; break; default: phy->ops.read_reg = igb_read_phy_reg_igp; @@ -272,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) if (ret_val) goto out; } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case I210_I_PHY_ID: phy->type = e1000_phy_i210; phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; phy->ops.get_phy_info = igb_get_phy_info_m88; phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; @@ -925,6 +926,8 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) if (phy->id == M88E1512_E_PHY_ID) ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); out: return ret_val; } @@ -2145,7 +2148,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ -static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; @@ -2169,7 +2172,7 @@ out: * * Writes data to MDI control register in the PHY at offset. **/ -static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index b1915043bc0c..c3c598c347a9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -927,7 +927,10 @@ /* Intel i347-AT4 Registers */ -#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ #define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ #define I347AT4_PAGE_SELECT 0x16 @@ -990,6 +993,7 @@ #define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define E1000_M88E1543_EEE_CTRL_1 0x0 #define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2003b3756ba2..4034207eb5cc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -441,6 +441,7 @@ struct e1000_phy_info { u16 cable_length; u16 max_cable_length; u16 min_cable_length; + u16 pair_length[4]; u8 mdix; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 65d931669f81..8aa798737d4d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -861,10 +861,10 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) if (ret_val) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ - igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | - E1000_PHY_PLL_FREQ_REG), &phy_word); + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); if ((phy_word & E1000_PHY_PLL_UNCONF) != E1000_PHY_PLL_UNCONF) { ret_val = 0; @@ -896,7 +896,35 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) /* restore WUC register */ wr32(E1000_WUC, wuc); } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); /* restore MDICNFG setting */ wr32(E1000_MDICNFG, mdicnfg); return ret_val; } + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 3442b6357d01..b2964a2a60b1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -84,7 +85,7 @@ enum E1000_INVM_STRUCTURE_TYPE { #define E1000_PCI_PMCSR_D3 0x03 #define E1000_MAX_PLL_TRIES 5 #define E1000_PHY_PLL_UNCONF 0xFF -#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_PAGE 0xFC #define E1000_PHY_PLL_FREQ_REG 0x000E #define E1000_INVM_DEFAULT_AL 0x202F #define E1000_INVM_AUTOLOAD 0x0A diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 23ec28f43f6d..5b54254aed4f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1717,59 +1717,76 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: case I210_I_PHY_ID: - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - (I347AT4_PCDL + phy->addr), - &phy_data); + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); if (ret_val) - return ret_val; + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - I347AT4_PCDC, &phy_data2); + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); if (ret_val) - return ret_val; + goto out; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); - /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); - break; - case M88E1543_E_PHY_ID: - case M88E1512_E_PHY_ID: - case I347AT4_E_PHY_ID: - /* Remember the original page select and set it to 7 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); if (ret_val) goto out; - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); if (ret_val) goto out; - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), - &phy_data); + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); if (ret_val) goto out; - /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); if (ret_val) goto out; - is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; /* Reset the page selec to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, @@ -2278,6 +2295,100 @@ out: } /** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * @@ -2494,66 +2605,6 @@ out: } /** - * igb_write_phy_reg_gs40g - Write GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to write to - * upper half is page to use. - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_write_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * igb_read_phy_reg_gs40g - Read GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to read to - * upper half is page to use. - * @data: data to read at register offset - * - * Acquires semaphore, if necessary, then reads the data in the PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_read_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - -/** * igb_set_master_slave_mode - Setup PHY for Master/slave mode * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 24d55edbb0e3..969a6ddafa3b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -62,6 +62,7 @@ void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); @@ -71,8 +72,8 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); s32 igb_get_cable_length_82580(struct e1000_hw *hw); -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_check_polarity_m88(struct e1000_hw *hw); /* IGP01E1000 Specific Registers */ @@ -143,17 +144,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define E1000_CABLE_LENGTH_UNDEFINED 0xFF -/* GS40G - I210 PHY defines */ -#define GS40G_PAGE_SELECT 0x16 -#define GS40G_PAGE_SHIFT 16 -#define GS40G_OFFSET_MASK 0xFFFF -#define GS40G_PAGE_2 0x20000 -#define GS40G_MAC_REG2 0x15 -#define GS40G_MAC_LB 0x4140 -#define GS40G_MAC_SPEED_1G 0X0006 -#define GS40G_COPPER_SPEC 0x0010 -#define GS40G_LINE_LB 0x4000 - /* SFP modules ID memory locations */ #define E1000_SFF_IDENTIFIER_OFFSET 0x00 #define E1000_SFF_IDENTIFIER_SFF 0x02 diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 4af2870e49f8..21d9d02885cb 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ @@ -385,8 +386,7 @@ do { \ #define array_wr32(reg, offset, value) \ wr32((reg) + ((offset) << 2), (value)) -#define array_rd32(reg, offset) \ - (readl(hw->hw_addr + reg + ((offset) << 2))) +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) /* DMA Coalescing registers */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1a2f1cc44b28..e3cb93bdb21a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -389,6 +389,8 @@ struct igb_adapter { u16 link_speed; u16 link_duplex; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + struct work_struct reset_task; struct work_struct watchdog_task; bool fc_autoneg; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2529bc625de4..1d329f1d047b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -127,10 +127,20 @@ static const struct igb_stats igb_gstrings_net_stats[] = { #define IGB_STATS_LEN \ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) @@ -2002,7 +2012,7 @@ static void igb_diag_test(struct net_device *netdev, /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ - if (igb_link_test(adapter, &data[4])) + if (igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) @@ -2011,21 +2021,21 @@ static void igb_diag_test(struct net_device *netdev, else igb_reset(adapter); - if (igb_reg_test(adapter, &data[0])) + if (igb_reg_test(adapter, &data[TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_eeprom_test(adapter, &data[1])) + if (igb_eeprom_test(adapter, &data[TEST_EEP])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_intr_test(adapter, &data[2])) + if (igb_intr_test(adapter, &data[TEST_IRQ])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); /* power up link for loopback test */ igb_power_up_link(adapter); - if (igb_loopback_test(adapter, &data[3])) + if (igb_loopback_test(adapter, &data[TEST_LOOP])) eth_test->flags |= ETH_TEST_FL_FAILED; /* restore speed, duplex, autoneg settings */ @@ -2045,16 +2055,16 @@ static void igb_diag_test(struct net_device *netdev, dev_info(&adapter->pdev->dev, "online testing starting\n"); /* PHY is powered down when interface is down */ - if (if_running && igb_link_test(adapter, &data[4])) + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; else - data[4] = 0; + data[TEST_LINK] = 0; /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; clear_bit(__IGB_TESTING, &adapter->state); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea7b09887245..31e5f3942839 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -946,7 +946,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, @@ -959,7 +958,7 @@ static int igb_request_msix(struct igb_adapter *adapter) vector++; - q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); if (q_vector->rx.ring && q_vector->tx.ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, @@ -1230,7 +1229,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, q_vector->tx.work_limit = adapter->tx_work_limit; /* initialize ITR configuration */ - q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); q_vector->itr_val = IGB_START_ITR; /* initialize pointer to rings */ @@ -2294,9 +2293,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = -EIO; - hw->hw_addr = pci_iomap(pdev, 0, 0); - if (!hw->hw_addr) + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); @@ -2378,8 +2379,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CSUM; - netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_SCTP_CRC; } netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2656,7 +2657,7 @@ err_sw_init: #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2823,7 +2824,7 @@ static void igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -2856,6 +2857,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + pci_sriov_set_totalvfs(pdev, 7); igb_enable_sriov(pdev, max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 1d2174526a4c..4b9156cd8b93 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -139,6 +139,7 @@ enum ixgbe_tx_flags { #define IXGBE_X540_VF_DEVICE_ID 0x1515 struct vf_data_storage { + struct pci_dev *vfdev; unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; @@ -224,6 +225,8 @@ struct ixgbe_rx_queue_stats { u64 csum_err; }; +#define IXGBE_TS_HDR_LEN 8 + enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, @@ -282,6 +285,8 @@ struct ixgbe_ring { u16 next_to_use; u16 next_to_clean; + unsigned long last_rx_timestamp; + union { u16 next_to_alloc; struct { @@ -312,7 +317,7 @@ enum ixgbe_ring_f_enum { }; #define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_RSS_INDICES_X550 63 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FCOE_INDICES 8 @@ -587,9 +592,10 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; - u16 queue; + u16 pool; u16 state; /* bitmask */ }; + #define IXGBE_MAC_STATE_DEFAULT 0x1 #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 @@ -639,6 +645,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) @@ -656,6 +664,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_VXLAN #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #endif +#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) /* Tx fast path data */ int num_tx_queues; @@ -755,9 +764,12 @@ struct ixgbe_adapter { unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; spinlock_t tmreg_lock; - struct cyclecounter cc; - struct timecounter tc; + struct cyclecounter hw_cc; + struct timecounter hw_tc; u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp)(struct ixgbe_adapter *); /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -883,9 +895,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, struct ixgbe_ring *); @@ -968,12 +981,33 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { + ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); + return; + } + + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 65db69b862fb..d8a9fb8a59e2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -765,13 +765,14 @@ mac_reset_top: ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; @@ -879,11 +880,12 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index a39afcf03e2c..fa8d4f40ac2a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -990,13 +990,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -1082,12 +1083,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ce61b36b94f1..64045053e874 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1884,10 +1884,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; @@ -2454,6 +2455,17 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + /* Poll for bit to read as set */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) + break; + usleep_range(100, 120); + } + if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { + hw_dbg(hw, "GIO disable did not set - requesting resets\n"); + goto gio_disable_fail; + } + /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || ixgbe_removed(hw->hw_addr)) @@ -2475,6 +2487,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * again to clear out any effects they may have had on our device. */ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); +gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) @@ -2987,43 +3000,44 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { - u32 bits = 0; - u32 first_empty_slot = 0; - s32 regindex; + s32 regindex, first_empty_slot; + u32 bits; /* short cut the special case */ if (vlan == 0) return 0; - /* - * Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (!bits && !(first_empty_slot)) + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; } - /* - * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan - * in the VLVF. Else use the first empty VLVF register for this - * vlan id. - */ - if (regindex >= IXGBE_VLVF_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else { - hw_dbg(hw, "No space in VLVF.\n"); - regindex = IXGBE_ERR_NO_SPACE; - } - } + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + hw_dbg(hw, "No space in VLVF.\n"); - return regindex; + return first_empty_slot ? : IXGBE_ERR_NO_SPACE; } /** @@ -3032,21 +3046,17 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFVFB * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { - s32 regindex; - u32 bitindex; - u32 vfta; - u32 bits; - u32 vt; - u32 targetbit; - bool vfta_changed = false; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - if (vlan > 4095) + if ((vlan > 4095) || (vind > 63)) return IXGBE_ERR_PARAM; /* @@ -3061,22 +3071,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[11-5]: which register * bits[4-0]: which bit in the register */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; /* Part 2 * If VT Mode is set @@ -3086,85 +3090,67 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * Or !vlan_on * clear the pool bit and possibly the vind */ - vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - if (vt & IXGBE_VT_CTL_VT_ENABLE) { - s32 vlvf_index; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); - if (vlvf_index < 0) - return vlvf_index; - - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits |= (1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits |= (1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits &= ~(1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits &= ~(1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - } - } + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + goto vfta_update; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } - /* - * If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= 1 << (vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool */ - if (bits) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), - (IXGBE_VLVF_VIEN | vlan)); - if (!vlan_on) { - /* someone wants to clear the vfta entry - * but some pools/VFs are still using it. - * Ignore it. */ - vfta_changed = false; - } - } else { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); - } + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return 0; } - if (vfta_changed) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return 0; } @@ -3184,8 +3170,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index a0044e4a8b90..2b9563137fd8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -92,7 +92,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on, bool vlvf_bypass); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index a507a6fe3624..02c7333a9c83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -139,6 +139,11 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ @@ -149,7 +154,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ - if (credit_max && (credit_max < min_credit)) + if (credit_max < min_credit) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 23277ab153b6..b5cc989a3d23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -223,13 +223,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 supports per TC Rx priority flow control. So - * clear all TCs and only enable those that should be + * X540 & X550 supports per TC Rx priority flow control. + * So clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - if (hw->mac.type == ixgbe_mac_X540) + if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d681273bd39d..bea96b3bc90c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,47 +151,70 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +/* currently supported speeds for 10G */ +#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) + +static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) + return SUPPORTED_10000baseT_Full; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + return SUPPORTED_10000baseKX4_Full; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full; + } +} + static int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; - u32 link_speed = 0; bool autoneg = false; - bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->advertising |= ADVERTISED_2500baseX_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } } else { - /* default modes in case phy.autoneg_advertised isn't set */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising = ADVERTISED_10000baseT_Full; @@ -229,6 +252,10 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { case ixgbe_sfp_type_da_cu: @@ -284,9 +311,8 @@ static int ixgbe_get_settings(struct net_device *netdev, break; } - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) { - switch (link_speed) { + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 631c603fc966..2a653ec954f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -77,7 +77,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!netdev) return 0; - if (xid >= IXGBE_FCOE_DDP_MAX) + if (xid >= netdev->fcoe_ddp_xid) return 0; adapter = netdev_priv(netdev); @@ -177,7 +177,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; adapter = netdev_priv(netdev); - if (xid >= IXGBE_FCOE_DDP_MAX) { + if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -517,6 +517,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { @@ -593,6 +594,8 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; first->tx_flags |= IXGBE_TX_FLAGS_TSO; + /* Hardware expects L4T to be RSV for FCoE TSO */ + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; } /* set flag indicating FCOE to ixgbe_tx_map call */ @@ -610,7 +613,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + type_tucmd, mss_l4len_idx); return 0; } @@ -620,8 +623,7 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) struct ixgbe_fcoe_ddp_pool *ddp_pool; ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - if (ddp_pool->pool) - dma_pool_destroy(ddp_pool->pool); + dma_pool_destroy(ddp_pool->pool); ddp_pool->pool = NULL; } @@ -997,8 +999,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, return -EINVAL; /* Don't return information on unsupported devices */ - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; /* Manufacturer */ @@ -1044,6 +1045,10 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, snprintf(info->model, sizeof(info->model), "Intel 82599"); + } else if (hw->mac.type == ixgbe_mac_X550) { + snprintf(info->model, + sizeof(info->model), + "Intel X550"); } else { snprintf(info->model, sizeof(info->model), diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f3168bcc7d87..e771e764daa3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -844,7 +844,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); - napi_hash_add(&q_vector->napi); #ifdef CONFIG_NET_RX_BUSY_POLL /* initialize busy poll */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 47395ff5d908..c4003a88bbf6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -65,9 +65,6 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" -#ifdef CONFIG_IXGBE_VXLAN -#include <net/vxlan.h> -#endif char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -175,6 +172,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *ixgbe_wq; + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, @@ -316,7 +315,7 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) if (!test_bit(__IXGBE_DOWN, &adapter->state) && !test_bit(__IXGBE_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbe_wq, &adapter->service_task); } static void ixgbe_remove_adapter(struct ixgbe_hw *hw) @@ -1484,7 +1483,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { - ring->rx_stats.csum_err++; + skb->ip_summed = CHECKSUM_NONE; return; } /* If we checked the outer header let the stack know */ @@ -1635,6 +1634,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; + u32 flags = rx_ring->q_vector->adapter->flags; ixgbe_update_rsc_stats(rx_ring, skb); @@ -1642,8 +1642,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); - if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -1659,6 +1659,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { + skb_mark_napi_id(skb, &q_vector->napi); if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); else @@ -2123,7 +2124,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ - skb_mark_napi_id(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -2741,7 +2741,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2757,7 +2757,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - if (!ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2947,10 +2948,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* would disable interrupts here but EIAM disabled it */ - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. @@ -3315,8 +3316,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, } /** - * Return a number of entries in the RSS indirection table - * + * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries * @adapter: device handle * * - 82598/82599/X540: 128 @@ -3334,8 +3334,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW - * + * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3374,8 +3373,7 @@ void ixgbe_store_reta(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW (for x550 devices in SRIOV mode) - * + * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3621,6 +3619,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); + /* Force flushing of IXGBE_RDLEN to prevent MDD */ + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); @@ -3704,6 +3705,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + /* * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV @@ -3901,12 +3905,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); set_bit(vid, adapter->active_vlans); return 0; } +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (idx = IXGBE_VLVF_ENTRIES; --idx;) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, word; + int idx; + + idx = ixgbe_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + word = idx * 2 + (VMDQ_P(0) / 32); + bits = ~(1 << (VMDQ_P(0)) % 32); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); + } +} + static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { @@ -3914,7 +3962,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + else + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + clear_bit(vid, adapter->active_vlans); return 0; @@ -3992,6 +4044,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) } } +static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + /* legacy case, we can just disable VLAN filtering */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); + u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); + + vlvfb |= 1 << (VMDQ_P(0) % 32); + IXGBE_WRITE_REG(hw, reg_offset, vlvfb); + } + + /* Set all bits in the VLAN filter table array */ + for (i = hw->mac.vft_size; i--;) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); +} + +#define VFTA_BLOCK_SIZE 8 +static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits; + + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern outselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + word = i * 2 + VMDQ_P(0) / 32; + bits = ~(1 << (VMDQ_P(0) % 32)); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); + } + + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); + } +} + +static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + + for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) + ixgbe_scrub_vfta(adapter, i); +} + static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid; @@ -4034,124 +4209,156 @@ static int ixgbe_write_mc_addr_list(struct net_device *netdev) #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, - adapter->mac_table[i].queue, + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, IXGBE_RAH_AV); else hw->mac.ops.clear_rar(hw, i); - - adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); } } -#endif +#endif static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { - if (adapter->mac_table[i].state & - IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, - adapter->mac_table[i].addr, - adapter->mac_table[i].queue, - IXGBE_RAH_AV); - else - hw->mac.ops.clear_rar(hw, i); - adapter->mac_table[i].state &= - ~(IXGBE_MAC_STATE_MODIFIED); - } + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) + continue; + + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); } } static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) { - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; } + ixgbe_sync_mac_table(adapter); } -static int ixgbe_available_rars(struct ixgbe_adapter *adapter) +static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i, count = 0; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state == 0) - count++; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* do not count default RAR as available */ + if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) + continue; + + /* only count unused and addresses that belong to us */ + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { + if (mac_table->pool != pool) + continue; + } + + count++; } + return count; } /* this function destroys the first RAR entry */ -static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, - u8 *addr) +static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].queue = VMDQ_P(0); - adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | - IXGBE_MAC_STATE_IN_USE); - hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].queue, + memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + mac_table->pool = VMDQ_P(0); + + mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; + + hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); } -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) continue; - adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | - IXGBE_MAC_STATE_IN_USE); - ether_addr_copy(adapter->mac_table[i].addr, addr); - adapter->mac_table[i].queue = queue; + + ether_addr_copy(mac_table->addr, addr); + mac_table->pool = pool; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE; + ixgbe_sync_mac_table(adapter); + return i; } + return -ENOMEM; } -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { - /* search table for addr, if found, set to 0 and sync */ - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (ether_addr_equal(addr, adapter->mac_table[i].addr) && - adapter->mac_table[i].queue == queue) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; - ixgbe_sync_mac_table(adapter); - return 0; - } + /* search table for addr, if found clear IN_USE flag and sync */ + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* we can only delete an entry if it is in use */ + if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) + continue; + /* we only care about entries that belong to the given pool */ + if (mac_table->pool != pool) + continue; + /* we only care about a specific MAC address */ + if (!ether_addr_equal(addr, mac_table->addr)) + continue; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + + ixgbe_sync_mac_table(adapter); + + return 0; } + return -ENOMEM; } /** @@ -4169,7 +4376,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) return -ENOMEM; if (!netdev_uc_empty(netdev)) { @@ -4183,6 +4390,25 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) return count; } +static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4197,12 +4423,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; - u32 vlnctrl; int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); /* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ @@ -4212,25 +4436,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - /* Only disable hardware filter vlans in promiscuous mode - * if SR-IOV and VMDQ are disabled - otherwise ensure - * that hardware VLAN filters remain enabled. - */ - if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED)) - vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + ixgbe_vlan_promisc_enable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } - vlnctrl |= IXGBE_VLNCTRL_VFE; hw->addr_ctrl.user_set_promisc = false; + ixgbe_vlan_promisc_disable(adapter); } /* @@ -4238,8 +4455,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); - if (count < 0) { + if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; } @@ -4275,7 +4491,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* NOTE: VLAN filtering is disabled by setting PROMISC */ } - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) @@ -5042,7 +5257,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err; - u8 old_addr[ETH_ALEN]; if (ixgbe_removed(hw->hw_addr)) return; @@ -5078,10 +5292,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - /* do not flush user set addresses */ - memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + + /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); - ixgbe_mac_set_default_filter(adapter, old_addr); + __dev_uc_unsync(netdev, NULL); + + /* do not flush user set addresses */ + ixgbe_mac_set_default_filter(adapter); /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) @@ -5331,6 +5548,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { @@ -6616,10 +6835,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; + unsigned int vf; u32 gpc; - int pos; - unsigned short vf_id; if (!(netif_carrier_ok(adapter->netdev))) return; @@ -6636,26 +6853,17 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) if (!pdev) return; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return; - - /* get the device ID for the VF */ - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); - /* check status reg for all VFs owned by this PF */ - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { - u16 status_reg; - - pci_read_config_word(vfdev, PCI_STATUS, &status_reg); - if (status_reg & PCI_STATUS_REC_MASTER_ABORT) - /* issue VFLR */ - ixgbe_issue_vf_flr(adapter, vfdev); - } + for (vf = 0; vf < adapter->num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + u16 status_reg; - vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + if (!vfdev) + continue; + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg != IXGBE_FAILED_READ_CFG_WORD && + status_reg & PCI_STATUS_REC_MASTER_ABORT) + ixgbe_issue_vf_flr(adapter, vfdev); } } @@ -7024,6 +7232,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct tcphdr *tcphdr; u8 *raw; } transport_hdr; + __be16 frag_off; if (skb->encapsulation) { network_hdr.raw = skb_inner_network_header(skb); @@ -7047,13 +7256,17 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, case 6: vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr.raw - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but version=%d\n", - network_hdr.ipv4->version); - } + break; } switch (l4_hdr) { @@ -7074,16 +7287,18 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, version=%d, l4 proto=%x\n", + network_hdr.ipv4->version, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -7358,7 +7573,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - if (skb->encapsulation) { + if (!skb->encapsulation) { + th = tcp_hdr(skb); + } else { #ifdef CONFIG_IXGBE_VXLAN struct ixgbe_adapter *adapter = q_vector->adapter; @@ -7377,14 +7594,34 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #else return; #endif /* CONFIG_IXGBE_VXLAN */ - } else { - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + } + + /* Currently only IPv4/IPv6 with TCP is supported */ + switch (hdr.ipv4->version) { + case IPVERSION: + if (hdr.ipv4->protocol != IPPROTO_TCP) return; - th = tcp_hdr(skb); + break; + case 6: + if (likely((unsigned char *)th - hdr.network == + sizeof(struct ipv6hdr))) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + } else { + __be16 frag_off; + u8 l4_hdr; + + ipv6_skip_exthdr(skb, hdr.network - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + return; + if (l4_hdr != IPPROTO_TCP) + return; + } + break; + default: + return; } /* skip this packet since it is invalid or the socket is closing */ @@ -7419,10 +7656,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring, common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (first->protocol == htons(ETH_P_IP)) { + switch (hdr.ipv4->version) { + case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { + break; + case 6: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ @@ -7432,6 +7671,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; } #ifdef CONFIG_IXGBE_VXLAN @@ -7659,17 +7901,16 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; - int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); - return ret > 0 ? 0 : ret; + ixgbe_mac_set_default_filter(adapter); + + return 0; } static int @@ -7920,6 +8161,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) */ if (netif_running(dev)) ixgbe_close(dev); + else + ixgbe_reset(adapter); + ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_IXGBE_DCB @@ -8152,7 +8396,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 pool = VMDQ_P(0); + + if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) return -ENOMEM; } @@ -8384,7 +8631,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_ALL_CSUM; + return features & ~NETIF_F_CSUM_MASK; return features; } @@ -8781,8 +9028,8 @@ skip_sriov: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->features |= NETIF_F_SCTP_CSUM; - netdev->hw_features |= NETIF_F_SCTP_CSUM | + netdev->features |= NETIF_F_SCTP_CRC; + netdev->hw_features |= NETIF_F_SCTP_CRC | NETIF_F_NTUPLE; break; default: @@ -8798,8 +9045,7 @@ skip_sriov: netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; - netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -8808,9 +9054,7 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->hw_enc_features |= NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM; break; default: break; @@ -8870,7 +9114,7 @@ skip_sriov: goto err_sw_init; } - ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + ixgbe_mac_set_default_filter(adapter); setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter); @@ -9325,6 +9569,12 @@ static int __init ixgbe_init_module(void) pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); + ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); + if (!ixgbe_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); + return -ENOMEM; + } + ixgbe_dbg_init(); ret = pci_register_driver(&ixgbe_driver); @@ -9356,6 +9606,10 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); + if (ixgbe_wq) { + destroy_workqueue(ixgbe_wq); + ixgbe_wq = NULL; + } } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fb8673d63806..db0731e05401 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -2393,6 +2393,9 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return 0; + if (!on && ixgbe_mng_present(hw)) + return 0; + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index e5ba04025e2b..ef1504d41890 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,6 +27,7 @@ *******************************************************************************/ #include "ixgbe.h" #include <linux/ptp_classify.h> +#include <linux/clocksource.h> /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -93,7 +94,6 @@ #define IXGBE_INCVAL_SHIFT_82599 7 #define IXGBE_INCPER_SHIFT_82599 24 -#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL #define IXGBE_OVERFLOW_PERIOD (HZ * 30) #define IXGBE_PTP_TX_TIMEOUT (HZ * 15) @@ -104,8 +104,68 @@ */ #define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL +/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL + * which contain measurements of seconds and nanoseconds respectively. This + * matches the standard linux representation of time in the kernel. In addition, + * the X550 also has a SYSTIMER register which represents residue, or + * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA + * register is used, but it is unlike the X540 and 82599 devices. TIMINCA + * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the + * high bit representing whether the adjustent is positive or negative. Every + * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range + * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the + * X550's clock for purposes of SYSTIME generation is constant and not dependent + * on the link speed. + * + * SYSTIMEH SYSTIMEL SYSTIMER + * +--------------+ +--------------+ +-------------+ + * X550 | 32 | | 32 | | 32 | + * *--------------+ +--------------+ +-------------+ + * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ + * + * This results in a full 96 bits to represent the clock, with 32 bits for + * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under + * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for + * underflow of adjustments. + * + * The 32 bits of seconds for the X550 overflows every + * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. + * + * In order to adjust the clock frequency for the X550, the TIMINCA register is + * provided. This register represents a + or minus nearly 0.5 ns adjustment to + * the base frequency. It is measured in 2^-32 ns units, with the high bit being + * the sign bit. This register enables software to calculate frequency + * adjustments and apply them directly to the clock rate. + * + * The math for converting ppb into TIMINCA values is fairly straightforward. + * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL + * + * This assumes that ppb is never high enough to create a value bigger than + * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this + * value is also simple. + * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL + * + * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is + * 12.5 nanoseconds. This means that the Max ppb is 39999999 + * Note: We subtract one in order to ensure no overflow, because the TIMINCA + * register can only hold slightly under 0.5 nanoseconds. + * + * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns + * into 2^-32 units, which is + * + * 12.5 * 2^32 = C80000000 + * + * Some revisions of hardware have a faster base frequency than the registers + * were defined for. To fix this, we use a timecounter structure with the + * proper mult and shift to convert the cycles into nanoseconds of time. + */ +#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL +#define INCVALUE_MASK 0x7FFFFFFF +#define ISGN 0x80000000 +#define MAX_TIMADJ 0x7FFFFFFF + /** - * ixgbe_ptp_setup_sdp + * ixgbe_ptp_setup_sdp_x540 * @hw: the hardware private structure * * this function enables or disables the clock out feature on SDP0 for @@ -116,83 +176,116 @@ * aligns the start of the PPS signal to that value. The shift is * necessary because it can change based on the link speed. */ -static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int shift = adapter->cc.shift; + int shift = adapter->hw_cc.shift; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; u64 ns = 0, clock_edge = 0; - if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && - (hw->mac.type == ixgbe_mac_X540)) { + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); - /* disable the pin first */ - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - IXGBE_WRITE_FLUSH(hw); + if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* - * enable the SDP0 pin as output, and connected to the - * native function for Timesync (ClockOut) - */ - esdp |= (IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE); + /* enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE; - /* - * enable the Clock Out feature on SDP0, and allow - * interrupts to occur when the pin changes - */ - tsauxc = (IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT); + /* enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT; - /* clock period (or pulse length) */ - clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); - clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + /* clock period (or pulse length) */ + clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); + clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - /* - * Account for the cyclecounter wrap-around value by - * using the converted ns value of the current time to - * check for when the next aligned second would occur. - */ - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - ns = timecounter_cyc2time(&adapter->tc, clock_edge); + /* Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); - div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); - clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); + clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - } else { - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - } + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) + * ixgbe_ptp_read_X550 - read cycle counter value + * @hw_cc: cyclecounter structure + * + * This function reads SYSTIME registers. It is called by the cyclecounter + * structure to convert from internal representation into nanoseconds. We need + * this for X550 since some skews do not have expected clock frequency and + * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of + * "cycles", rather than seconds and nanoseconds. + */ +static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +{ + struct ixgbe_adapter *adapter = + container_of(hw_cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + struct timespec64 ts; + + /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. + * Some revisions of hardware run at a higher frequency and so the + * cycles are not guaranteed to be nanoseconds. The timespec64 created + * here is used for its math/conversions but does not necessarily + * represent nominal time. + * + * It should be noted that this cyclecounter will overflow at a + * non-bitmask field since we have to convert our billions of cycles + * into an actual cycles count. This results in some possible weird + * situations at high cycle counter stamps. However given that 32 bits + * of "seconds" is ~138 years this isn't a problem. Even at the + * increased frequency of some revisions, this is still ~103 years. + * Since the SYSTIME values start at 0 and we never write them, it is + * highly unlikely for the cyclecounter to overflow in practice. + */ + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + + return (u64)timespec64_to_ns(&ts); +} + +/** + * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the * arbitrary fixed point registers */ -static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) +static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = - container_of(cc, struct ixgbe_adapter, cc); + container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; u64 stamp = 0; @@ -203,20 +296,79 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) } /** - * ixgbe_ptp_adjfreq + * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + struct timespec64 systime; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + switch (adapter->hw.mac.type) { + /* X550 and later hardware supposedly represent time using a seconds + * and nanoseconds counter, instead of raw 64bits nanoseconds. We need + * to convert the timestamp into cycles before it can be fed to the + * cyclecounter. We need an actual cyclecounter because some revisions + * of hardware run at a higher frequency and thus the counter does + * not represent seconds/nanoseconds. Instead it can be thought of as + * cycles and billions of cycles. + */ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + systime.tv_sec = timestamp >> 32; + systime.tv_nsec = timestamp & 0xFFFFFFFF; + + timestamp = timespec64_to_ns(&systime); + break; + default: + break; + } + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ixgbe_ptp_adjfreq_82599 * @ptp: the ptp clock structure * @ppb: parts per billion adjustment from base * * adjust the frequency of the ptp cycle counter by the * indicated ppb from the base frequency. */ -static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; - u64 freq; - u32 diff, incval; + u64 freq, incval; + u32 diff; int neg_adj = 0; if (ppb < 0) { @@ -235,12 +387,16 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) switch (hw->mac.type) { case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + if (incval > 0xFFFFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); break; case ixgbe_mac_82599EB: + if (incval > 0x00FFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + ((u32)incval & 0x00FFFFFFUL)); break; default: break; @@ -250,6 +406,43 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } /** + * ixgbe_ptp_adjfreq_X550 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the SYSTIME registers by the indicated ppb from base + * frequency + */ +static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + int neg_adj = 0; + u64 rate = IXGBE_X550_BASE_PERIOD; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate *= ppb; + rate = div_u64(rate, 1000000000ULL); + + /* warn if rate is too large */ + if (rate >= INCVALUE_MASK) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); + + return 0; +} + +/** * ixgbe_ptp_adjtime * @ptp: the ptp clock structure * @delta: offset to adjust the cycle counter by @@ -263,10 +456,11 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_adjtime(&adapter->tc, delta); + timecounter_adjtime(&adapter->hw_tc, delta); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -283,11 +477,11 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; + u64 ns; spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->tc); + ns = timecounter_read(&adapter->hw_tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@ -308,17 +502,16 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; - - ns = timespec64_to_ns(ts); + u64 ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_init(&adapter->tc, &adapter->cc, ns); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -343,33 +536,26 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, * event when the clock SDP triggers. Clear mask when PPS is * disabled */ - if (rq->type == PTP_CLK_REQ_PPS) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - if (on) - adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; - else - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - - ixgbe_ptp_setup_sdp(adapter); - return 0; - default: - break; - } - } + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - return -ENOTSUPP; + adapter->ptp_setup_sdp(adapter); + return 0; } /** * ixgbe_ptp_check_pps_event * @adapter: the private adapter structure - * @eicr: the interrupt cause register value * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. */ -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; @@ -425,7 +611,9 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + struct ixgbe_ring *rx_ring; unsigned long rx_event; + int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit @@ -437,19 +625,43 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; - if (time_after(adapter->last_rx_timestamp, rx_event)) - rx_event = adapter->last_rx_timestamp; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5*HZ)) { + if (time_is_before_jiffies(rx_event + 5 * HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; e_warn(drv, "clearing RX Timestamp hang\n"); } } /** + * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: the private adapter struct * @@ -461,23 +673,15 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; - u64 regval = 0, ns; - unsigned long flags; + u64 regval = 0; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(ns); + ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + ixgbe_ptp_clear_tx_timestamp(adapter); } /** @@ -497,38 +701,85 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; - if (timeout) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - e_warn(drv, "clearing Tx Timestamp hang\n"); + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ixgbe_ptp_clear_tx_timestamp(adapter); return; } + /* stop polling once we have a valid timestamp */ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { ixgbe_ptp_tx_hwtstamp(adapter); - else + return; + } + + if (timeout) { + ixgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang\n"); + } else { /* reschedule to keep checking if it's not available yet */ schedule_work(&adapter->ptp_tx_work); + } } /** - * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp - * @adapter: pointer to adapter struct + * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer + * @q_vector: structure containing interrupt and ring information + * @skb: the packet + * + * This function will be called by the Rx routine of the timestamp for this + * packet is stored in the buffer. The value is stored in little endian format + * starting at the end of the packet data. + */ +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + __le64 regval; + + /* copy the bits out of the skb, and then trim the skb length */ + skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, + IXGBE_TS_HDR_LEN); + __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); + + /* The timestamp is recorded in little endian format, and is stored at + * the end of the packet. + * + * DWORD: N N + 1 N + 2 + * Field: End of Packet SYSTIMH SYSTIML + */ + ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval)); +} + +/** + * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { - struct ixgbe_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps *shhwtstamps; - u64 regval = 0, ns; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u64 regval = 0; u32 tsyncrxctl; - unsigned long flags; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) @@ -537,17 +788,7 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - - /* Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - adapter->last_rx_timestamp = jiffies; + ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) @@ -610,14 +851,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -631,9 +878,21 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: + /* The X550 controller is capable of timestamping all packets, + * which allows it to accept any filter. + */ + if (hw->mac.type >= ixgbe_mac_X550) { + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + break; + } + /* fall through */ default: /* * register RXMTRL must be set in order to do V1 packets, @@ -641,16 +900,46 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } + /* Per-packet timestamping only works if the filter is set to all + * packets. Since this is desired, always timestamp all packets as long + * as any Rx filter was configured. + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* enable timestamping all packets only if at least some + * packets were requested. Otherwise, play nice and disable + * timestamping + */ + if (config->rx_filter == HWTSTAMP_FILTER_NONE) + break; + + tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | + IXGBE_TSYNCRXCTL_TYPE_ALL | + IXGBE_TSYNCRXCTL_TSIP_UT_EN; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + is_l2 = true; + break; + default: + break; + } + /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), @@ -678,8 +967,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); /* clear TX/RX time stamp registers, just to be sure */ - regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); - regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + ixgbe_ptp_clear_tx_timestamp(adapter); + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return 0; } @@ -712,23 +1001,9 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) -EFAULT : 0; } -/** - * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter: pointer to the adapter structure - * - * This function should be called to set the proper values for the TIMINCA - * register and tell the cyclecounter structure what the tick rate of SYSTIME - * is. It does not directly modify SYSTIME registers or the timecounter - * structure. It should be called whenever a new TIMINCA value is necessary, - * such as during initialization or when the link speed changes. - */ -void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, + u32 *shift, u32 *incval) { - struct ixgbe_hw *hw = &adapter->hw; - u32 incval = 0; - u32 shift = 0; - unsigned long flags; - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -745,36 +1020,98 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) */ switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: - incval = IXGBE_INCVAL_100; - shift = IXGBE_INCVAL_SHIFT_100; + *shift = IXGBE_INCVAL_SHIFT_100; + *incval = IXGBE_INCVAL_100; break; case IXGBE_LINK_SPEED_1GB_FULL: - incval = IXGBE_INCVAL_1GB; - shift = IXGBE_INCVAL_SHIFT_1GB; + *shift = IXGBE_INCVAL_SHIFT_1GB; + *incval = IXGBE_INCVAL_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: default: - incval = IXGBE_INCVAL_10GB; - shift = IXGBE_INCVAL_SHIFT_10GB; + *shift = IXGBE_INCVAL_SHIFT_10GB; + *incval = IXGBE_INCVAL_10GB; break; } +} - /** - * Modify the calculated values to fit within the correct - * number of bits specified by the hardware. The 82599 doesn't - * have the same space as the X540, so bitshift the calculated - * values to fit. +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct cyclecounter cc; + unsigned long flags; + u32 incval = 0; + u32 tsauxc = 0; + u32 fuse0 = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is + * designed to represent seconds and nanoseconds when this is + * the case. However, some revisions of hardware have a 400Mhz + * clock and we have to compensate for this frequency + * variation using corrected mult and shift values. + */ + fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); + if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { + cc.mult = 3; + cc.shift = 2; + } + /* fallthrough */ + case ixgbe_mac_X550: + cc.read = ixgbe_ptp_read_X550; + + /* enable SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + IXGBE_WRITE_FLUSH(hw); + break; case ixgbe_mac_X540: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); break; case ixgbe_mac_82599EB: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); incval >>= IXGBE_INCVAL_SHIFT_82599; - shift -= IXGBE_INCVAL_SHIFT_82599; + cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + (1 << IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ @@ -787,13 +1124,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - - memset(&adapter->cc, 0, sizeof(adapter->cc)); - adapter->cc.read = ixgbe_ptp_read; - adapter->cc.mask = CYCLECOUNTER_MASK(64); - adapter->cc.shift = shift; - adapter->cc.mult = 1; - + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } @@ -814,29 +1145,27 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; - /* set SYSTIME registers to 0 just in case */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); - IXGBE_WRITE_FLUSH(hw); - /* reset the hardware timestamping mode */ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + /* 82598 does not support PTP */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + ixgbe_ptp_start_cyclecounter(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); - - /* reset the ns time counter */ - timecounter_init(&adapter->tc, &adapter->cc, + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - /* - * Now that the shift has been calculated and the systime + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); } /** @@ -845,11 +1174,11 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) * * This function performs setup of the user entry point function table and * initializes the PTP clock device, which is used to access the clock-like - * features of the PTP core. It will be called by ixgbe_ptp_init, only if - * there isn't already a clock device (such as after a suspend/resume cycle, - * where the clock device wasn't destroyed). + * features of the PTP core. It will be called by ixgbe_ptp_init, and may + * reuse a previously initialized clock (such as during a suspend/resume + * cycle). */ -static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) +static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; long err; @@ -869,11 +1198,12 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, @@ -885,14 +1215,31 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 30000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = NULL; break; default: adapter->ptp_clock = NULL; + adapter->ptp_setup_sdp = NULL; return -EOPNOTSUPP; } @@ -961,18 +1308,13 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) return; - /* since this might be called in suspend, we don't clear the state, - * but simply reset the auxiliary PPS signal control register - */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0); + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); /* ensure that we cancel any pending PTP Tx work item in progress */ cancel_work_sync(&adapter->ptp_tx_work); - if (adapter->ptp_tx_skb) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - } + ixgbe_ptp_clear_tx_timestamp(adapter); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index fcd8b27a0ccb..8025a3f93598 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -130,6 +130,38 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) return -ENOMEM; } +/** + * ixgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + pci_dev_get(vfdev); + adapter->vfinfo[vf].vfdev = vfdev; + ++vf; + } +} + /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ @@ -170,8 +202,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) } } - if (!__ixgbe_enable_sriov(adapter)) + if (!__ixgbe_enable_sriov(adapter)) { + ixgbe_get_vfs(adapter); return; + } /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. @@ -184,6 +218,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { + unsigned int num_vfs = adapter->num_vfs, vf; struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; @@ -192,6 +227,16 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + adapter->vfinfo[vf].vfdev = NULL; + pci_dev_put(vfdev); + } + /* free VF control structures */ kfree(adapter->vfinfo); adapter->vfinfo = NULL; @@ -289,6 +334,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) e_dev_warn("Failed to enable PCI sriov: %d\n", err); return err; } + ixgbe_get_vfs(adapter); ixgbe_sriov_reinit(adapter); return num_vfs; @@ -406,11 +452,34 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && !add) - return 0; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); + if (err) + return err; + } + + err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); - return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + + return err; } static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) @@ -516,13 +585,75 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); } + +static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i; + + /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ + for (i = IXGBE_VLVF_ENTRIES; i--;) { + u32 bits[2], vlvfb, vid, vfta, vlvf; + u32 word = i * 2 + vf / 32; + u32 mask = 1 << (vf % 32); + + vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* if our bit isn't set we can skip it */ + if (!(vlvfb & mask)) + continue; + + /* clear our bit from vlvfb */ + vlvfb ^= mask; + + /* create 64b mask to chedk to see if we should clear VLVF */ + bits[word % 2] = vlvfb; + bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); + + /* if promisc is enabled, PF will be present, leave VFTA */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { + bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); + + if (bits[0] || bits[1]) + goto update_vlvfb; + goto update_vlvf; + } + + /* if other pools are present, just remove ourselves */ + if (bits[0] || bits[1]) + goto update_vlvfb; + + /* if we cannot determine VLAN just remove ourselves */ + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + if (!vlvf) + goto update_vlvfb; + + vid = vlvf & VLAN_VID_MASK; + mask = 1 << (vid % 32); + + /* clear bit from VFTA */ + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); + if (vfta & mask) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); +update_vlvf: + /* clear POOL selection enable */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); +update_vlvfb: + /* clear pool bits */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); + } +} + static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; u8 num_tcs = netdev_get_num_tc(adapter->netdev); - /* add PF assigned VLAN or VLAN 0 */ + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ @@ -768,40 +899,14 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } -static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) -{ - u32 vlvf; - s32 regindex; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* Search for the vlan id in the VLVF entries */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if ((vlvf & VLAN_VID_MASK) == vlan) - break; - } - - /* Return a negative value if not found */ - if (regindex >= IXGBE_VLVF_ENTRIES) - regindex = -1; - - return regindex; -} - static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { + u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + u8 tcs = netdev_get_num_tc(adapter->netdev); struct ixgbe_hw *hw = &adapter->hw; - int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); int err; - s32 reg_ndx; - u32 vlvf; - u32 bits; - u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -811,54 +916,23 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, return -1; } - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - /* in case of promiscuous mode any VLAN filter set for a VF must - * also have the PF pool added to it. - */ - if (add && adapter->netdev->flags & IFF_PROMISC) - err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (!err && adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + if (err) + return err; - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && adapter->netdev->flags & IFF_PROMISC) { - reg_ndx = ixgbe_find_vlvf_entry(hw, vid); - if (reg_ndx < 0) - return err; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - if (VMDQ_P(0) < 32) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - bits &= ~(1 << VMDQ_P(0)); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - bits &= ~(1 << (VMDQ_P(0) - 32)); - bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - } + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && !bits) - ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); - } + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; - return err; + return 0; } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -1239,6 +1313,9 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, if (err) goto out; + /* Revoke tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, false, 0, vf); + ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) @@ -1272,6 +1349,8 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + /* Restore tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 995f03107eac..bf7367a08716 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1020,6 +1020,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ #define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ #define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ @@ -1036,6 +1037,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ /* Diagnostic Registers */ #define IXGBE_RDSTATCTL 0x02C20 @@ -1345,7 +1347,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 @@ -1353,6 +1358,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ #define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ @@ -2209,6 +2215,7 @@ enum { #define IXGBE_TSAUXC_EN_CLK 0x00000004 #define IXGBE_TSAUXC_SYNCLK 0x00000008 #define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2218,8 +2225,12 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 #define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ + +#define IXGBE_TSIM_TXTS 0x00000002 #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2332,6 +2343,7 @@ enum { #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ @@ -2768,6 +2780,7 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ #define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ @@ -3288,7 +3301,7 @@ struct ixgbe_mac_operations { s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); @@ -3508,7 +3521,7 @@ struct ixgbe_info { #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV1 BIT(6) +#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index c1d4584f6469..2358c1b7d586 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -57,8 +57,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) struct ixgbe_phy_info *phy = &hw->phy; /* set_phy_power was set by default to NULL */ - if (!ixgbe_mng_present(hw)) - phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; @@ -110,13 +109,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -154,12 +154,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ebe0ac950b14..87aca3f7c3de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -26,6 +26,8 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); + static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -85,79 +87,6 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) } /** - * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register - * @hw: pointer to hardware structure - * @reg: the register to check - * - * Performs a diagnostic on a register in the CS4227 chip. Returns an error - * if it is not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg) -{ - s32 status; - u32 retry; - u16 reg_val; - - reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1; - status = ixgbe_write_cs4227(hw, reg, reg_val); - if (status) - return status; - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - msleep(IXGBE_CS4227_CHECK_DELAY); - reg_val = 0xFFFF; - ixgbe_read_cs4227(hw, reg, ®_val); - if (!reg_val) - break; - } - if (reg_val) { - hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg); - return status; - } - - return 0; -} - -/** - * ixgbe_get_cs4227_status - Return CS4227 status - * @hw: pointer to hardware structure - * - * Performs a diagnostic on the CS4227 chip. Returns an error if it is - * not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) -{ - s32 status; - u16 value = 0; - - /* Exit if the diagnostic has already been performed. */ - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - if (status) - return status; - if (value == IXGBE_CS4227_RESET_COMPLETE) - return 0; - - /* Check port 0. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB); - if (status) - return status; - - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB); - if (status) - return status; - - /* Check port 1. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB + - (1 << 12)); - if (status) - return status; - - return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB + - (1 << 12)); -} - -/** * ixgbe_read_pe - Read register from port expander * @hw: pointer to hardware structure * @reg: register number to read @@ -326,13 +255,6 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) return; } - /* Is the CS4227 working correctly? */ - status = ixgbe_get_cs4227_status(hw); - if (status) { - hw_err(hw, "CS4227 status failed: %d", status); - goto out; - } - /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_COMPLETE); @@ -1257,31 +1179,71 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, if (status) return status; - /* Configure CS4227 LINE side to 10G SR. */ - slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); - value = IXGBE_CS4227_SPEED_10G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); - - /* Configure CS4227 for HOST connection rate then type. */ - slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); - value = speed & IXGBE_LINK_SPEED_10GB_FULL ? - IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* Configure CS4227 LINE side to 10G SR. */ + slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + value = IXGBE_CS4227_SPEED_10G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); - if (setup_linear) - value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Configure CS4227 for HOST connection rate then type. */ + slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + value = speed & IXGBE_LINK_SPEED_10GB_FULL ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Setup XFI internal link. */ status = ixgbe_setup_ixfi_x550em(hw, &speed); + if (status) { + hw_dbg(hw, "setup_ixfi failed with %d\n", status); + return status; + } + } else { + /* Configure internal PHY for KR/KX. */ + status = ixgbe_setup_kr_speed_x550em(hw, speed); + if (status) { + hw_dbg(hw, "setup_kr_speed failed with %d\n", status); + return status; + } + /* Configure CS4227 LINE side to proper mode. */ + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + } + + return 0; + +i2c_err: + hw_dbg(hw, "combined i2c access failed with %d\n", status); return status; } @@ -1482,7 +1444,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; - /* High temperature failure alarm triggered */ + /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); @@ -1496,6 +1458,21 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, @@ -1549,14 +1526,15 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) if (status) return status; - /* Enables high temperature failure alarm */ + /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status) return status; - reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, @@ -1765,6 +1743,12 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; + if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } + /* If link is not up, then there is no setup necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) @@ -1873,10 +1857,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) u32 save_autoneg; bool link_up; - /* SW LPLU not required on later HW revisions. */ - if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))) - return 0; - /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) return 0; @@ -1969,7 +1949,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; - ixgbe_link_speed speed; s32 ret_val; hw->mac.ops.set_lan_id(hw); @@ -1982,13 +1961,6 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) * to determine internal PHY mode. */ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If internal PHY mode is KR, then initialize KR link */ - if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } } /* Identify the PHY or SFP module */ @@ -2020,18 +1992,13 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ - if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - phy->ops.setup_internal_link = - ixgbe_setup_internal_phy_t_x550em; - } else { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; /* setup SW LPLU only for first revision */ - if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, - IXGBE_FUSES0_GROUP(0)))) + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & + IXGBE_FUSES0_REV_MASK)) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; @@ -2176,13 +2143,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d3e5f5b37999..c48aef613b0a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -774,7 +774,7 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_10K_ITR; + tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ec3147279621..68ec7daa04fd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -326,8 +326,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 -#define IXGBE_10K_ITR 400 -#define IXGBE_8K_ITR 500 +#define IXGBE_12K_ITR 336 /* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 592ff237d692..3558f019b631 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -59,7 +59,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "2.12.1-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2012 Intel Corporation."; + "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -96,12 +96,14 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static struct workqueue_struct *ixgbevf_wq; + static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbevf_wq, &adapter->service_task); } static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) @@ -1014,6 +1016,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + if (budget <= 0) + return budget; #ifdef CONFIG_NET_RX_BUSY_POLL if (!ixgbevf_qv_lock_napi(q_vector)) return budget; @@ -1043,7 +1047,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting & 1) + if (adapter->rx_itr_setting == 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) @@ -1138,7 +1142,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) if (q_vector->tx.ring && !q_vector->rx.ring) { /* Tx only vector */ if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; + q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { @@ -1196,7 +1200,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) + * 100-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -1246,8 +1250,9 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) new_itr = IXGBE_20K_ITR; break; case bulk_latency: + new_itr = IXGBE_12K_ITR; + break; default: - new_itr = IXGBE_8K_ITR; break; } @@ -1288,7 +1293,7 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -1332,7 +1337,6 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int i, j; int rqpv, tqpv; - int err = 0; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -1345,7 +1349,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) for (; txr_idx < txr_remaining; v_start++, txr_idx++) map_vector_to_txq(adapter, v_start, txr_idx); - goto out; + return 0; } /* If we don't have enough vectors for a 1-to-1 @@ -1370,8 +1374,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) } } -out: - return err; + return 0; } /** @@ -1469,9 +1472,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { - int err = 0; - - err = ixgbevf_request_msix_irqs(adapter); + int err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); @@ -1830,7 +1831,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err = -EOPNOTSUPP; + int err; spin_lock_bh(&adapter->mbx_lock); @@ -2046,7 +2047,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; - int err = 0, idx = 0; + int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2260,10 +2261,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, - netdev->addr_len); + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } adapter->last_reset = jiffies; @@ -2421,7 +2420,7 @@ err_allocation: static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err = 0; + int err; int vector, v_budget; /* It's easy to be greedy for MSI-X vectors, but it really @@ -2439,26 +2438,21 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - err = -ENOMEM; - goto out; - } + if (!adapter->msix_entries) + return -ENOMEM; for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = ixgbevf_acquire_msix_vectors(adapter, v_budget); if (err) - goto out; + return err; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) - goto out; - - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + return err; -out: - return err; + return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); } /** @@ -2483,9 +2477,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_add(&q_vector->napi); -#endif adapter->q_vector[q_idx] = q_vector; } @@ -2662,13 +2653,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); - memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); + ether_addr_copy(hw->mac.addr, netdev->dev_addr); + ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); } /* Enable dynamic interrupt throttling rates */ @@ -3355,6 +3347,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + __be16 frag_off; switch (first->protocol) { case htons(ETH_P_IP): @@ -3365,13 +3358,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; + if (likely(skb_network_header_len(skb) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } break; } @@ -3393,16 +3389,18 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, l3 proto=%x, l4 proto=%x\n", + first->protocol, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -3698,8 +3696,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + ether_addr_copy(hw->mac.addr, addr->sa_data); spin_lock_bh(&adapter->mbx_lock); @@ -4248,15 +4246,17 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - int ret; - pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); pr_info("%s\n", ixgbevf_copyright); + ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); + if (!ixgbevf_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); + return -ENOMEM; + } - ret = pci_register_driver(&ixgbevf_driver); - return ret; + return pci_register_driver(&ixgbevf_driver); } module_init(ixgbevf_init_module); @@ -4270,6 +4270,10 @@ module_init(ixgbevf_init_module); static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); + if (ixgbevf_wq) { + destroy_workqueue(ixgbevf_wq); + ixgbevf_wq = NULL; + } } #ifdef DEBUG diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 427f3605cbfc..61a98f4c5746 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -117,7 +117,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - ether_addr_copy(hw->mac.perm_addr, addr); + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + ether_addr_copy(hw->mac.perm_addr, addr); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 060dd3922974..b1de7afd4116 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2753,7 +2753,7 @@ static netdev_features_t jme_fix_features(struct net_device *netdev, netdev_features_t features) { if (netdev->mtu > 1900) - features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); return features; } diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 581928c068f2..b630ef1e9646 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -375,22 +375,16 @@ static int ltq_etop_mdio_probe(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (priv->mii_bus->phy_map[phy_addr]) { - phydev = priv->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), <q_etop_mdio_link, priv->pldata->mii_mode); if (IS_ERR(phydev)) { @@ -408,9 +402,7 @@ ltq_etop_mdio_probe(struct net_device *dev) phydev->advertising = phydev->supported; priv->phydev = phydev; - pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, - dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -435,18 +427,9 @@ ltq_etop_mdio_init(struct net_device *dev) priv->mii_bus->name = "ltq_mii"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", priv->pdev->name, priv->pdev->id); - priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; ++i) - priv->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(priv->mii_bus)) { err = -ENXIO; - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; } if (ltq_etop_mdio_probe(dev)) { @@ -457,8 +440,6 @@ ltq_etop_mdio_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(priv->mii_bus); -err_out_free_mdio_irq: - kfree(priv->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(priv->mii_bus); err_out: @@ -472,7 +453,6 @@ ltq_etop_mdio_cleanup(struct net_device *dev) phy_disconnect(priv->phydev); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4182290fdbcf..a0c03834a2f7 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -3133,7 +3133,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!mp->phy) err = -ENODEV; else - phy_addr_set(mp, mp->phy->addr); + phy_addr_set(mp, mp->phy->mdio.addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); @@ -3257,25 +3257,20 @@ static struct platform_driver mv643xx_eth_driver = { }, }; +static struct platform_driver * const drivers[] = { + &mv643xx_eth_shared_driver, + &mv643xx_eth_driver, +}; + static int __init mv643xx_eth_init_module(void) { - int rc; - - rc = platform_driver_register(&mv643xx_eth_shared_driver); - if (!rc) { - rc = platform_driver_register(&mv643xx_eth_driver); - if (rc) - platform_driver_unregister(&mv643xx_eth_shared_driver); - } - - return rc; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(mv643xx_eth_init_module); static void __exit mv643xx_eth_cleanup_module(void) { - platform_driver_unregister(&mv643xx_eth_driver); - platform_driver_unregister(&mv643xx_eth_shared_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(mv643xx_eth_cleanup_module); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index fc2fb25343f4..8982c882af1b 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; - int i, ret; + int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -207,14 +207,6 @@ static int orion_mdio_probe(struct platform_device *pdev) dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!bus->irq) - return -ENOMEM; - - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = PHY_POLL; - dev = bus->priv; dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!dev->regs) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ed622fa29dfa..fabc8df40392 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -110,9 +110,17 @@ #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 +#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) -/* Exception Interrupt Port/Queue Cause register */ +/* Exception Interrupt Port/Queue Cause register + * + * Their behavior depend of the mapping done using the PCPX2Q + * registers. For a given CPU if the bit associated to a queue is not + * set, then for the register a read from this CPU will always return + * 0 and a write won't do anything + */ #define MVNETA_INTR_NEW_CAUSE 0x25a0 #define MVNETA_INTR_NEW_MASK 0x25a4 @@ -254,6 +262,11 @@ #define MVNETA_TX_MTU_MAX 0x3ffff +/* The RSS lookup table actually has 256 entries but we do not use + * them yet + */ +#define MVNETA_RSS_LU_TABLE_SIZE 1 + /* TSO header size */ #define TSO_HEADER_SIZE 128 @@ -356,6 +369,7 @@ struct mvneta_port { struct mvneta_tx_queue *txqs; struct net_device *dev; struct notifier_block cpu_notifier; + int rxq_def; /* Core clock */ struct clk *clk; @@ -371,9 +385,11 @@ struct mvneta_port { unsigned int duplex; unsigned int speed; unsigned int tx_csum_limit; - int use_inband_status:1; + unsigned int use_inband_status:1; u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + + u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -499,6 +515,9 @@ struct mvneta_tx_queue { /* DMA address of TSO headers */ dma_addr_t tso_hdrs_phys; + + /* Affinity mask for CPUs*/ + cpumask_t affinity_mask; }; struct mvneta_rx_queue { @@ -819,7 +838,13 @@ static void mvneta_port_up(struct mvneta_port *pp) mvreg_write(pp, MVNETA_TXQ_CMD, q_map); /* Enable all initialized RXQs. */ - mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + if (rxq->descs != NULL) + q_map |= (1 << queue); + } + mvreg_write(pp, MVNETA_RXQ_CMD, q_map); } /* Stop the Ethernet port activity */ @@ -973,6 +998,44 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } +static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) +{ + u32 val; + + if (enable) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_AN_FLOW_CTRL_EN); + val |= MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + val |= MVNETA_GMAC2_INBAND_AN_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } else { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } +} + /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. @@ -987,6 +1050,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) int cpu; int queue; u32 val; + int max_cpu = num_present_cpus(); /* Clear all Cause registers */ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); @@ -1002,13 +1066,33 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Enable MBUS Retry bit16 */ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); - /* Set CPU queue access map - all CPUs have access to all RX - * queues and to all TX queues + /* Set CPU queue access map. CPUs are assigned to the RX and + * TX queues modulo their number. If there is only one TX + * queue then it is assigned to the CPU associated to the + * default RX queue. */ - for_each_present_cpu(cpu) - mvreg_write(pp, MVNETA_CPU_MAP(cpu), - (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | - MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); + for_each_present_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq, txq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + + for (txq = 0; txq < txq_number; txq++) + if ((txq % max_cpu) == cpu) + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); + + /* With only one TX queue we configure a special case + * which will allow to get all the irq on a single + * CPU + */ + if (txq_number == 1) + txq_map = (cpu == pp->rxq_def) ? + MVNETA_CPU_TXQ_ACCESS(1) : 0; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + } /* Reset RX and TX DMAs */ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); @@ -1029,7 +1113,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_ACC_MODE, val); /* Update val of portCfg register accordingly with all RxQueue types */ - val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); val = 0; @@ -1058,26 +1142,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); - if (pp->use_inband_status) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_AN_FLOW_CTRL_EN); - val |= MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - } else { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN); - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - } - + mvneta_set_autoneg(pp, pp->use_inband_status); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2082,19 +2147,19 @@ static void mvneta_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_PROMISC) { /* Accept all: Multicast + Unicast */ mvneta_rx_unicast_promisc_set(pp, 1); - mvneta_set_ucast_table(pp, rxq_def); - mvneta_set_special_mcast_table(pp, rxq_def); - mvneta_set_other_mcast_table(pp, rxq_def); + mvneta_set_ucast_table(pp, pp->rxq_def); + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept single Unicast */ mvneta_rx_unicast_promisc_set(pp, 0); mvneta_set_ucast_table(pp, -1); - mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); + mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast */ - mvneta_set_special_mcast_table(pp, rxq_def); - mvneta_set_other_mcast_table(pp, rxq_def); + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept only initialized multicast */ mvneta_set_special_mcast_table(pp, -1); @@ -2103,7 +2168,7 @@ static void mvneta_set_rx_mode(struct net_device *dev) if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { mvneta_mcast_addr_set(pp, ha->addr, - rxq_def); + pp->rxq_def); } } } @@ -2154,6 +2219,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) { int rx_done = 0; u32 cause_rx_tx; + int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); @@ -2185,8 +2251,15 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); + cause_rx_tx |= port->cause_rx_tx; - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); + + if (rx_queue) { + rx_queue = rx_queue - 1; + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); + } + budget -= rx_done; if (budget > 0) { @@ -2303,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, static int mvneta_txq_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + int cpu; + txq->size = pp->tx_ring_size; /* A queue must always have room for at least one skb. @@ -2355,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp, } mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + /* Setup XPS mapping */ + if (txq_number > 1) + cpu = txq->id % num_present_cpus(); + else + cpu = pp->rxq_def % num_present_cpus(); + cpumask_set_cpu(cpu, &txq->affinity_mask); + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); + return 0; } @@ -2399,19 +2482,27 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp) /* Cleanup all Rx queues */ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { - mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); + int queue; + + for (queue = 0; queue < txq_number; queue++) + mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } /* Init all Rx queues */ static int mvneta_setup_rxqs(struct mvneta_port *pp) { - int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); - if (err) { - netdev_err(pp->dev, "%s: can't create rxq=%d\n", - __func__, rxq_def); - mvneta_cleanup_rxqs(pp); - return err; + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); + + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", + __func__, queue); + mvneta_cleanup_rxqs(pp); + return err; + } } return 0; @@ -2435,6 +2526,31 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } +static void mvneta_percpu_unmask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are unmasked, but actually only the ones + * maped to this CPU will be unmasked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK_ALL | + MVNETA_TX_INTR_MASK_ALL | + MVNETA_MISCINTR_INTR_MASK); +} + +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * maped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + static void mvneta_start_dev(struct mvneta_port *pp) { unsigned int cpu; @@ -2452,11 +2568,10 @@ static void mvneta_start_dev(struct mvneta_port *pp) napi_enable(&port->napi); } - /* Unmask interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + /* Unmask interrupts. It has to be done from each CPU */ + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, + pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2615,7 +2730,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) mvneta_mac_addr_set(pp, dev->dev_addr, -1); /* Set new addr in hw */ - mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); + mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); eth_commit_mac_addr_change(dev, addr); return 0; @@ -2732,22 +2847,45 @@ static void mvneta_percpu_disable(void *arg) static void mvneta_percpu_elect(struct mvneta_port *pp) { - int online_cpu_idx, cpu, i = 0; + int online_cpu_idx, max_cpu, cpu, i = 0; - online_cpu_idx = rxq_def % num_online_cpus(); + online_cpu_idx = pp->rxq_def % num_online_cpus(); + max_cpu = num_present_cpus(); for_each_online_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + if (i == online_cpu_idx) - /* Enable per-CPU interrupt on the one CPU we - * just elected + /* Map the default receive queue queue to the + * elected CPU */ - smp_call_function_single(cpu, mvneta_percpu_enable, - pp, true); + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); + + /* We update the TX queue map only if we have one + * queue. In this case we associate the TX queue to + * the CPU bound to the default RX queue + */ + if (txq_number == 1) + txq_map = (i == online_cpu_idx) ? + MVNETA_CPU_TXQ_ACCESS(1) : 0; else - /* Disable per-CPU interrupt on all the other CPU */ - smp_call_function_single(cpu, mvneta_percpu_disable, - pp, true); + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & + MVNETA_CPU_TXQ_ACCESS_ALL_MASK; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + + /* Update the interrupt mask on each CPU according the + * new mapping + */ + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, + pp, true); i++; + } }; @@ -2782,12 +2920,22 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); napi_enable(&port->napi); + + /* Enable per-CPU interrupts on the CPU that is + * brought up. + */ + smp_call_function_single(cpu, mvneta_percpu_enable, + pp, true); + /* Enable per-CPU interrupt on the one CPU we care * about. */ mvneta_percpu_elect(pp); - /* Unmask all ethernet port interrupts */ + /* Unmask all ethernet port interrupts, as this + * notifier is called for each CPU then the CPU to + * Queue mapping is applied + */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number) | @@ -2838,7 +2986,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int ret; + int ret, cpu; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + @@ -2868,8 +3016,13 @@ static int mvneta_open(struct net_device *dev) */ mvneta_percpu_disable(pp); - /* Elect a CPU to handle our RX queue interrupt */ - mvneta_percpu_elect(pp); + /* Enable per-CPU interrupt on all the CPU to handle our RX + * queue interrupts + */ + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_enable, + pp, true); + /* Register a CPU notifier to handle the case where our CPU * might be taken offline. @@ -2943,10 +3096,43 @@ int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvneta_port *pp = netdev_priv(dev); + struct phy_device *phydev = pp->phy_dev; - if (!pp->phy_dev) + if (!phydev) return -ENODEV; + if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { + u32 val; + + mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); + + if (cmd->autoneg == AUTONEG_DISABLE) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX); + + if (phydev->duplex) + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (phydev->speed == SPEED_1000) + val |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (phydev->speed == SPEED_100) + val |= MVNETA_GMAC_CONFIG_MII_SPEED; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); + netdev_info(pp->dev, "autoneg status set to %i\n", + pp->use_inband_status); + + if (netif_running(dev)) { + mvneta_port_down(pp); + mvneta_port_up(pp); + } + } + return phy_ethtool_sset(pp->phy_dev, cmd); } @@ -3098,6 +3284,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return MVNETA_RSS_LU_TABLE_SIZE; +} + +static int mvneta_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = rxq_number; + return 0; + case ETHTOOL_GRXFH: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int mvneta_config_rss(struct mvneta_port *pp) +{ + int cpu; + u32 val; + + netif_tx_stop_all_queues(pp->dev); + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, + pp, true); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_synchronize(&pcpu_port->napi); + napi_disable(&pcpu_port->napi); + } + + pp->rxq_def = pp->indir[0]; + + /* Update unicast mapping */ + mvneta_set_rx_mode(pp->dev); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ + mvneta_percpu_elect(pp); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_enable(&pcpu_port->napi); + } + + netif_tx_start_all_queues(pp->dev); + + return 0; +} + +static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + /* We require at least one supported parameter to be changed + * and no change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); + + return mvneta_config_rss(pp); +} + +static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); + + return 0; +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3122,6 +3408,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, + .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, + .get_rxnfc = mvneta_ethtool_get_rxnfc, + .get_rxfh = mvneta_ethtool_get_rxfh, + .set_rxfh = mvneta_ethtool_set_rxfh, }; /* Initialize hw */ @@ -3230,9 +3520,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) return -EINVAL; } - if (pp->use_inband_status) - ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; - /* Cancel Port Reset */ ctrl &= ~MVNETA_GMAC2_PORT_RESET; mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); @@ -3314,6 +3601,10 @@ static int mvneta_probe(struct platform_device *pdev) strcmp(managed, "in-band-status") == 0); pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; + pp->rxq_def = rxq_def; + + pp->indir[0] = rxq_def; + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); @@ -3423,7 +3714,7 @@ static int mvneta_probe(struct platform_device *pdev) mvneta_fixed_link_update(pp, phy); - put_device(&phy->dev); + put_device(&phy->mdio.dev); } return 0; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d9884fd15b45..a4beccf1fd46 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, } /* Free all buffers from the pool */ -static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) { int i; for (i = 0; i < bm_pool->buf_num; i++) { + dma_addr_t buf_phys_addr; u32 vaddr; /* Get buffer virtual address (indirect access) */ - mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + buf_phys_addr = mvpp2_read(priv, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + + dma_unmap_single(dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + if (!vaddr) break; dev_kfree_skb_any((struct sk_buff *)vaddr); @@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, { u32 val; - mvpp2_bm_bufs_free(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); if (bm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); return 0; @@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, MVPP2_BM_LONG_BUF_NUM : MVPP2_BM_SHORT_BUF_NUM; else - mvpp2_bm_bufs_free(port->priv, new_pool); + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool); new_pool->pkt_size = pkt_size; @@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) int pkt_size = MVPP2_RX_PKT_SIZE(mtu); /* Update BM pool with new buffer size */ - mvpp2_bm_bufs_free(port->priv, port_pool); + mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); if (port_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); return -EIO; @@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, mvpp2_txq_inc_get(txq_pcpu); - if (!skb) - continue; - dma_unmap_single(port->dev->dev.parent, buf_phys_addr, skb_headlen(skb), DMA_TO_DEVICE); + if (!skb) + continue; dev_kfree_skb_any(skb); } } @@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; - int rx_received, rx_filled, i; + int rx_received; + int rx_done = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, if (rx_todo > rx_received) rx_todo = rx_received; - rx_filled = 0; - for (i = 0; i < rx_todo; i++) { + while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; + dma_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; - rx_filled++; + rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; + phys_addr = rx_desc->buf_phys_addr; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { + err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; @@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb = (struct sk_buff *)rx_desc->buf_cookie; + err = mvpp2_rx_refill(port, bm_pool, bm, 0); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + dma_unmap_single(dev->dev.parent, phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + rcvd_pkts++; rcvd_bytes += rx_bytes; atomic_inc(&bm_pool->in_use); @@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, mvpp2_rx_csum(port, rx_status, skb); napi_gro_receive(&port->napi, skb); - - err = mvpp2_rx_refill(port, bm_pool, bm, 0); - if (err) { - netdev_err(port->dev, "failed to refill BM pools\n"); - rx_filled--; - } } if (rcvd_pkts) { @@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, /* Update Rx queue management counters */ wmb(); - mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); return rx_todo; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5606a043063e..ec0a22119e09 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4380,7 +4380,7 @@ static netdev_features_t sky2_fix_features(struct net_device *dev, */ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { netdev_info(dev, "checksum offload not possible with jumbo frames\n"); - features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); + features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK); } /* Some hardware requires receive checksum for RSS to work. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 8a083d73efdb..038f9ce391e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -242,6 +242,13 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) unsigned long flags; u64 ns, zero = 0; + /* mlx4_en_init_timestamp is called for each netdev. + * mdev->ptp_clock is common for all ports, skip initialization if + * was done for other port. + */ + if (mdev->ptp_clock) + return; + rwlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index eb8a4988de63..af975a2b74c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -155,13 +155,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) { - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, - NAPI_POLL_WEIGHT); - } else { + if (cq->is_tx) + netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + else netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); - napi_hash_add(&cq->napi); - } napi_enable(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ddb5541882f5..dd84cabb2a51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -337,11 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + -#ifdef CONFIG_NET_RX_BUSY_POLL - (priv->rx_ring_num * 5); -#else (priv->rx_ring_num * 2); -#endif case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -408,11 +404,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[index++] = priv->rx_ring[i]->yields; - data[index++] = priv->rx_ring[i]->misses; - data[index++] = priv->rx_ring[i]->cleaned; -#endif } spin_unlock_bh(&priv->stats_lock); @@ -486,14 +477,6 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); -#ifdef CONFIG_NET_RX_BUSY_POLL - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_napi_yield", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_misses", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_cleaned", i); -#endif } break; case ETH_SS_PRIV_FLAGS: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 005f910ec955..e0ec280a7fa1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -232,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) if (mdev->pndev[i]) mlx4_en_destroy_netdev(mdev->pndev[i]); - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) - mlx4_en_remove_timestamp(mdev); - flush_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue); (void) mlx4_mr_free(dev, &mdev->mr); @@ -320,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; - /* Initialize time stamp mechanism */ - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) - mlx4_en_init_timestamp(mdev); - /* Set default number of RX rings*/ mlx4_en_set_num_rx_rings(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 886e1bc86374..0c7e3f69a73b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,34 +69,6 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int mlx4_en_low_latency_recv(struct napi_struct *napi) -{ - struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - int done; - - if (!priv->port_up) - return LL_FLUSH_FAILED; - - if (!mlx4_en_cq_lock_poll(cq)) - return LL_FLUSH_BUSY; - - done = mlx4_en_process_rx_cq(dev, cq, 4); - if (likely(done)) - rx_ring->cleaned += done; - else - rx_ring->misses++; - - mlx4_en_cq_unlock_poll(cq); - - return done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -1561,8 +1533,6 @@ int mlx4_en_start_port(struct net_device *dev) for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; - mlx4_en_cq_init_lock(cq); - err = mlx4_en_init_affinity_hint(priv, i); if (err) { en_err(priv, "Failed preparing IRQ affinity hint\n"); @@ -1859,13 +1829,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) for (i = 0; i < priv->rx_ring_num; i++) { struct mlx4_en_cq *cq = priv->rx_cq[i]; - local_bh_disable(); - while (!mlx4_en_cq_lock_napi(cq)) { - pr_info("CQ %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - napi_synchronize(&cq->napi); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); @@ -2072,6 +2035,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev) /* flush any pending task for this netdev */ flush_workqueue(mdev->workqueue); + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_remove_timestamp(mdev); + /* Detach the netdev so tasks would not attempt to access it */ mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; @@ -2504,9 +2470,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = mlx4_en_low_latency_recv, -#endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, @@ -3058,9 +3021,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + /* Initialize time stamp mechanism */ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) - queue_delayed_work(mdev->workqueue, &priv->service_task, - SERVICE_TASK_DELAY); + mlx4_en_init_timestamp(mdev); + + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, mdev->profile.prof[priv->port].rx_ppp, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e7a5000aa12c..41440b2b20a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -873,10 +873,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - TCP/IP (v4) * - without IP options * - not an IP fragment - * - no LLS polling in progress */ - if (!mlx4_en_cq_busy_polling(cq) && - (dev->features & NETIF_F_GRO)) { + if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; @@ -927,7 +925,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); - skb_mark_napi_id(gro_skb, &cq->napi); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -990,13 +987,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } - skb_mark_napi_id(skb, &cq->napi); - - if (!mlx4_en_cq_busy_polling(cq)) - napi_gro_receive(&cq->napi, skb); - else - netif_receive_skb(skb); - + napi_gro_receive(&cq->napi, skb); next: for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr); @@ -1038,13 +1029,8 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int done; - if (!mlx4_en_cq_lock_napi(cq)) - return budget; - done = mlx4_en_process_rx_cq(dev, cq, budget); - mlx4_en_cq_unlock_napi(cq); - /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { const struct cpumask *aff; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 603d1c3d3b2e..4696053165f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -151,6 +151,17 @@ void mlx4_gen_slave_eqe(struct work_struct *work) eqe = next_slave_event_eqe(slave_eq)) { slave = eqe->slave_id; + if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && + eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && + mlx4_is_bonded(dev)) { + struct mlx4_port_cap port_cap; + + if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) + goto consume; + + if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) + goto consume; + } /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i <= dev->persist->num_vfs; i++) { @@ -174,6 +185,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work) mlx4_warn(dev, "Failed to generate event for slave %d\n", slave); } +consume: ++slave_eq->cons; } } @@ -594,7 +606,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) @@ -606,7 +620,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } else { /* IB port */ @@ -636,7 +650,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (i == mlx4_master_func_num(dev)) continue; @@ -645,7 +661,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 90db94e83fde..2c2baab9d880 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1104,6 +1104,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->link_state = (field & 0x80) >> 7; port_cap->supported_port_types = field & 3; port_cap->suggested_type = (field >> 3) & 1; port_cap->default_sense = (field >> 4) & 1; @@ -1310,6 +1311,15 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, port_type |= MLX4_PORT_LINK_UP_MASK; else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) port_type &= ~MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { + int other_port = (port == 1) ? 2 : 1; + struct mlx4_port_cap port_cap; + + err = mlx4_QUERY_PORT(dev, other_port, &port_cap); + if (err) + goto out; + port_type |= (port_cap.link_state << 7); + } MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); @@ -1325,7 +1335,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_PKEY_OFFSET); } - +out: return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 08de5555c2f4..7ea258af636a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -44,6 +44,7 @@ struct mlx4_mod_stat_cfg { }; struct mlx4_port_cap { + u8 link_state; u8 supported_port_types; u8 suggested_type; u8 default_sense; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 31c491e02e69..f1b6d219e445 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1221,6 +1221,76 @@ err_set_port: return err ? err : count; } +/* bond for multi-function device */ +#define MAX_MF_BOND_ALLOWED_SLAVES 63 +static int mlx4_mf_bond(struct mlx4_dev *dev) +{ + int err = 0; + struct mlx4_slaves_pport slaves_port1; + struct mlx4_slaves_pport slaves_port2; + DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); + + slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); + slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); + bitmap_and(slaves_port_1_2, + slaves_port1.slaves, slaves_port2.slaves, + dev->persist->num_vfs + 1); + + /* only single port vfs are allowed */ + if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { + mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); + return -EINVAL; + } + + /* limit on maximum allowed VFs */ + if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > + MAX_MF_BOND_ALLOWED_SLAVES) + return -EINVAL; + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); + return -EINVAL; + } + + err = mlx4_bond_mac_table(dev); + if (err) + return err; + err = mlx4_bond_vlan_table(dev); + if (err) + goto err1; + err = mlx4_bond_fs_rules(dev); + if (err) + goto err2; + + return 0; +err2: + (void)mlx4_unbond_vlan_table(dev); +err1: + (void)mlx4_unbond_mac_table(dev); + return err; +} + +static int mlx4_mf_unbond(struct mlx4_dev *dev) +{ + int ret, ret1; + + ret = mlx4_unbond_fs_rules(dev); + if (ret) + mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); + ret1 = mlx4_unbond_mac_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); + ret = ret1; + } + ret1 = mlx4_unbond_vlan_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); + ret = ret1; + } + return ret; +} + int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; @@ -1228,16 +1298,23 @@ int mlx4_bond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (!mlx4_is_bonded(dev)) + if (!mlx4_is_bonded(dev)) { ret = mlx4_do_bond(dev, true); - else - ret = 0; + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + if (!ret && mlx4_is_master(dev)) { + ret = mlx4_mf_bond(dev); + if (ret) { + mlx4_err(dev, "bond for multifunction failed\n"); + mlx4_do_bond(dev, false); + } + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to bond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is bonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_bond); @@ -1249,14 +1326,24 @@ int mlx4_unbond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (mlx4_is_bonded(dev)) + if (mlx4_is_bonded(dev)) { + int ret2 = 0; + ret = mlx4_do_bond(dev, false); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + if (mlx4_is_master(dev)) + ret2 = mlx4_mf_unbond(dev); + if (ret2) { + mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); + ret = ret2; + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to unbond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is unbonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_unbond); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e1cf9036af22..2404c22ad2b2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -736,6 +736,7 @@ struct mlx4_catas_err { struct mlx4_mac_table { __be64 entries[MLX4_MAX_MAC_NUM]; int refs[MLX4_MAX_MAC_NUM]; + bool is_dup[MLX4_MAX_MAC_NUM]; struct mutex mutex; int total; int max; @@ -758,6 +759,7 @@ struct mlx4_roce_gid_table { struct mlx4_vlan_table { __be32 entries[MLX4_MAX_VLAN_NUM]; int refs[MLX4_MAX_VLAN_NUM]; + int is_dup[MLX4_MAX_VLAN_NUM]; struct mutex mutex; int total; int max; @@ -1225,6 +1227,10 @@ void mlx4_init_roce_gid_table(struct mlx4_dev *dev, struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); +int mlx4_bond_vlan_table(struct mlx4_dev *dev); +int mlx4_unbond_vlan_table(struct mlx4_dev *dev); +int mlx4_bond_mac_table(struct mlx4_dev *dev); +int mlx4_unbond_mac_table(struct mlx4_dev *dev); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ @@ -1385,6 +1391,8 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); int mlx4_do_bond(struct mlx4_dev *dev, bool enable); +int mlx4_bond_fs_rules(struct mlx4_dev *dev); +int mlx4_unbond_fs_rules(struct mlx4_dev *dev); enum mlx4_zone_flags { MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c41f15102ae0..35de7d2e6b34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -320,11 +320,6 @@ struct mlx4_en_rx_ring { void *rx_info; unsigned long bytes; unsigned long packets; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned long yields; - unsigned long misses; - unsigned long cleaned; -#endif unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; @@ -347,18 +342,6 @@ struct mlx4_en_cq { struct mlx4_cqe *buf; #define MLX4_EN_OPCODE_ERROR 0x1e -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define MLX4_EN_CQ_STATE_IDLE 0 -#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ -#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ -#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) -#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ -#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ -#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) -#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) - spinlock_t poll_lock; /* protects from LLS/napi conflicts */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ struct irq_desc *irq_desc; }; @@ -622,115 +605,6 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) return buf + idx * cqe_sz; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ - spin_lock_init(&cq->poll_lock); - cq->state = MLX4_EN_CQ_STATE_IDLE; -} - -/* called from the device poll rutine to get ownership of a cq */ -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock(&cq->poll_lock); - if (cq->state & MLX4_CQ_LOCKED) { - WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); - cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; - rc = false; - } else - /* we don't care if someone yielded */ - cq->state = MLX4_EN_CQ_STATE_NAPI; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* returns true is someone tried to get the cq while napi had it */ -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | - MLX4_EN_CQ_STATE_NAPI_YIELD)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* called from mlx4_en_low_latency_poll() */ -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock_bh(&cq->poll_lock); - if ((cq->state & MLX4_CQ_LOCKED)) { - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - - cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; - rc = false; - rx_ring->yields++; - } else - /* preserve yield marks */ - cq->state |= MLX4_EN_CQ_STATE_POLL; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* returns true if someone tried to get the cq while it was locked */ -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock_bh(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); - return cq->state & CQ_USER_PEND; -} -#else -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ -} - -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - return true; -} - -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c2b21313dba7..f2550425c251 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -61,6 +61,7 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = 1 << dev->caps.log_num_macs; table->total = 0; @@ -74,6 +75,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; table->total = 0; @@ -159,21 +161,94 @@ int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); +static bool mlx4_need_mf_bond(struct mlx4_dev *dev) +{ + int i, num_eth_ports = 0; + + if (!mlx4_is_mfunc(dev)) + return false; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + ++num_eth_ports; + + return (num_eth_ports == 2) ? true : false; +} + int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; int i, err = 0; int free = -1; + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n", + (unsigned long long)mac, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; - mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", - (unsigned long long) mac, port); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))) + index_at_port = i; + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + + /* check that same mac is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the mac is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the mac is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the mac at the same index. + * Otherwise, you cannot bond (primary contains a different mac + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } - mutex_lock(&table->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (!table->refs[i]) { if (free < 0) free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } continue; } @@ -182,10 +257,30 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) /* MAC already registered, increment ref count */ err = i; ++table->refs[i]; + if (dup) { + u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]); + + if (dup_mac != mac || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n", + mac, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + mlx4_dbg(dev, "Free MAC index is %d\n", free); if (table->total == table->max) { @@ -205,10 +300,35 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) goto out; } table->refs[free] = 1; - err = free; + table->is_dup[free] = false; ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } + err = free; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_register_mac); @@ -255,6 +375,9 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) struct mlx4_port_info *info; struct mlx4_mac_table *table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; if (port < 1 || port > dev->caps.num_ports) { mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); @@ -262,22 +385,59 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) } info = &mlx4_priv(dev)->port[port]; table = &info->mac_table; - mutex_lock(&table->mutex); + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + index = find_index(dev, table, mac); if (validate_index(dev, table, index)) goto out; - if (--table->refs[index]) { + + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); + if (mlx4_set_port_mac_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port); --table->total; + + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port); + + --table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); @@ -311,9 +471,22 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) struct mlx4_mac_table *table = &info->mac_table; int index = qpn - info->base_qpn; int err = 0; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; /* CX1 doesn't support multi-functions */ - mutex_lock(&table->mutex); + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } err = validate_index(dev, table, index); if (err) @@ -326,9 +499,30 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); table->entries[index] = 0; + } else { + if (dup) { + dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n", + (unsigned long long)new_mac); + dup_table->entries[index] = 0; + } + } } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_replace_mac); @@ -380,8 +574,28 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int i, err = 0; int free = -1; - - mutex_lock(&table->mutex); + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n", + vlan, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } if (table->total == table->max) { /* No free vlan entries */ @@ -389,22 +603,85 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, goto out; } + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + index_at_port = i; + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + /* check that same vlan is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the vlan is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the vlan is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the vlan at the same index. + * Otherwise, you cannot bond (primary contains a different vlan + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if (free < 0 && (table->refs[i] == 0)) { - free = i; - continue; + if (!table->refs[i]) { + if (free < 0) + free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } } - if (table->refs[i] && + if ((table->refs[i] || table->is_dup[i]) && (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) { /* Vlan already registered, increase references count */ + mlx4_dbg(dev, "vlan %u is already registered.\n", vlan); *index = i; ++table->refs[i]; + if (dup) { + u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]); + + if (dup_vlan != vlan || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n", + vlan, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + if (free < 0) { err = -ENOMEM; goto out; @@ -412,6 +689,7 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, /* Register new VLAN */ table->refs[free] = 1; + table->is_dup[free] = false; table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); err = mlx4_set_port_vlan_table(dev, port, table->entries); @@ -421,11 +699,35 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, table->entries[free] = 0; goto out; } + ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } *index = free; - ++table->total; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } @@ -455,8 +757,22 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } - mutex_lock(&table->mutex); if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); goto out; @@ -467,16 +783,38 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) goto out; } - if (--table->refs[index]) { + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", table->refs[index], index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_vlan_table(dev, port, table->entries); + if (mlx4_set_port_vlan_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port); --table->total; + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port); + --dup_table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) @@ -495,6 +833,220 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) } EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); +int mlx4_bond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror MAC tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_bond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror VLAN tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 6fec3e993d02..b46dbe29ef6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -222,6 +222,13 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; int qpn; + /* VF DMFS mbox with port flipped */ + void *mirr_mbox; + /* > 0 --> apply mirror when getting into HA mode */ + /* = 0 --> un-apply mirror when getting out of HA mode */ + u32 mirr_mbox_size; + struct list_head mirr_list; + u64 mirr_rule_id; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -4284,6 +4291,22 @@ err_mac: return err; } +static u32 qp_attach_mbox_size(void *mbox) +{ + u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); + struct _rule_hw *rule_header; + + rule_header = (struct _rule_hw *)(mbox + size); + + while (rule_header->size) { + size += rule_header->size * sizeof(u32); + rule_header += 1; + } + return size; +} + +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4300,15 +4323,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; + struct res_fs_rule *rrule; + u32 mbox_size; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; - ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); - if (ctrl->port <= 0) + err = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (err <= 0) return -EINVAL; + ctrl->port = err; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { @@ -4328,7 +4354,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } break; case MLX4_NET_TRANS_RULE_ID_IB: @@ -4339,7 +4365,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } vhcr->in_modifier += sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; @@ -4347,7 +4373,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, default: pr_err("Corrupted mailbox\n"); err = -EINVAL; - goto err_put; + goto err_put_qp; } execute: @@ -4356,23 +4382,69 @@ execute: MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - goto err_put; + goto err_put_qp; + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources\n"); - /* detach rule*/ + goto err_detach; + } + + err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); + if (err) + goto err_detach; + + mbox_size = qp_attach_mbox_size(inbox->buf); + rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); + if (!rrule->mirr_mbox) { + err = -ENOMEM; + goto err_put_rule; + } + rrule->mirr_mbox_size = mbox_size; + rrule->mirr_rule_id = 0; + memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); + + /* set different port */ + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; + if (ctrl->port == 1) + ctrl->port = 2; + else + ctrl->port = 1; + + if (mlx4_is_bonded(dev)) + mlx4_do_mirror_rule(dev, rrule); + + atomic_inc(&rqp->ref_count); + +err_put_rule: + put_res(dev, slave, vhcr->out_param, RES_FS_RULE); +err_detach: + /* detach rule on error */ + if (err) mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - goto err_put; - } - atomic_inc(&rqp->ref_count); -err_put: +err_put_qp: put_res(dev, slave, qpn, RES_QP); return err; } +static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + int err; + + err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + return err; + } + + mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + return 0; +} + int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4382,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, int err; struct res_qp *rqp; struct res_fs_rule *rrule; + u64 mirr_reg_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4390,12 +4463,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); if (err) return err; + + if (!rrule->mirr_mbox) { + mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + return -EINVAL; + } + mirr_reg_id = rrule->mirr_rule_id; + kfree(rrule->mirr_mbox); + /* Release the rule form busy state before removal */ put_res(dev, slave, vhcr->in_param, RES_FS_RULE); err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); if (err) return err; + if (mirr_reg_id && mlx4_is_bonded(dev)) { + err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); + if (err) { + mlx4_err(dev, "Fail to get resource of mirror rule\n"); + } else { + put_res(dev, slave, mirr_reg_id, RES_FS_RULE); + mlx4_undo_mirror_rule(dev, rrule); + } + } err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources\n"); @@ -4833,6 +4924,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + struct res_fs_rule *mirr_rule; + u64 reg_id; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (!fs_rule->mirr_mbox) { + mlx4_err(dev, "rule mirroring mailbox is null\n"); + return -EINVAL; + } + memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); + err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (err) + goto err; + + err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); + if (err) + goto err_detach; + + err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); + if (err) + goto err_rem; + + fs_rule->mirr_rule_id = reg_id; + mirr_rule->mirr_rule_id = 0; + mirr_rule->mirr_mbox_size = 0; + mirr_rule->mirr_mbox = NULL; + put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); + + return 0; +err_rem: + rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); +err_detach: + mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +err: + return err; +} + +static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; + struct rb_node *p; + struct res_fs_rule *fs_rule; + int err = 0; + LIST_HEAD(mirr_list); + + for (p = rb_first(root); p; p = rb_next(p)) { + fs_rule = rb_entry(p, struct res_fs_rule, com.node); + if ((bond && fs_rule->mirr_mbox_size) || + (!bond && !fs_rule->mirr_mbox_size)) + list_add_tail(&fs_rule->mirr_list, &mirr_list); + } + + list_for_each_entry(fs_rule, &mirr_list, mirr_list) { + if (bond) + err += mlx4_do_mirror_rule(dev, fs_rule); + else + err += mlx4_undo_mirror_rule(dev, fs_rule); + } + return err; +} + +int mlx4_bond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, true); +} + +int mlx4_unbond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, false); +} + static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 158c88c69ef9..c503ea05e742 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -13,6 +13,7 @@ config MLX5_CORE config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + select PTP_1588_CLOCK default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 26a68b8af2c5..01c0256effb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \ - en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ + en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ + en_txrx.o en_clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 22e72bf1ae48..9ea49a893323 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -32,6 +32,9 @@ #include <linux/if_vlan.h> #include <linux/etherdevice.h> +#include <linux/timecounter.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/cq.h> @@ -64,6 +67,8 @@ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 +#define MLX5E_NUM_MAIN_GROUPS 9 + static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ "rx_packets", @@ -282,6 +287,19 @@ struct mlx5e_params { u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; }; +struct mlx5e_tstamp { + rwlock_t lock; + struct cyclecounter cycles; + struct timecounter clock; + struct hwtstamp_config hwtstamp_config; + u32 nominal_c_mult; + unsigned long overflow_period; + struct delayed_work overflow_work; + struct mlx5_core_dev *mdev; + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; +}; + enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, }; @@ -313,6 +331,7 @@ struct mlx5e_rq { struct device *pdev; struct net_device *netdev; + struct mlx5e_tstamp *tstamp; struct mlx5e_rq_stats stats; struct mlx5e_cq cq; @@ -326,14 +345,12 @@ struct mlx5e_rq { struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; -struct mlx5e_tx_skb_cb { +struct mlx5e_tx_wqe_info { u32 num_bytes; u8 num_wqebbs; u8 num_dma; }; -#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) - enum mlx5e_dma_map_type { MLX5E_DMA_MAP_SINGLE, MLX5E_DMA_MAP_PAGE @@ -369,6 +386,7 @@ struct mlx5e_sq { /* pointers to per packet info: write@xmit, read@completion */ struct sk_buff **skb; struct mlx5e_sq_dma *dma_fifo; + struct mlx5e_tx_wqe_info *wqe_info; /* read only */ struct mlx5_wq_cyc wq; @@ -381,6 +399,7 @@ struct mlx5e_sq { u16 max_inline; u16 edge; struct device *pdev; + struct mlx5e_tstamp *tstamp; __be32 mkey_be; unsigned long state; @@ -442,7 +461,7 @@ enum mlx5e_rqt_ix { struct mlx5e_eth_addr_info { u8 addr[ETH_ALEN + 2]; u32 tt_vec; - u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ + struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT]; }; #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) @@ -465,15 +484,23 @@ enum { }; struct mlx5e_vlan_db { - u32 active_vlans_ft_ix[VLAN_N_VID]; - u32 untagged_rule_ft_ix; - u32 any_vlan_rule_ft_ix; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; + struct mlx5_flow_rule *untagged_rule; + struct mlx5_flow_rule *any_vlan_rule; bool filter_disabled; }; struct mlx5e_flow_table { - void *vlan; - void *main; + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + +struct mlx5e_flow_tables { + struct mlx5_flow_namespace *ns; + struct mlx5e_flow_table vlan; + struct mlx5e_flow_table main; }; struct mlx5e_priv { @@ -496,7 +523,7 @@ struct mlx5e_priv { u32 rqtn[MLX5E_NUM_RQT]; u32 tirn[MLX5E_NUM_TT]; - struct mlx5e_flow_table ft; + struct mlx5e_flow_tables fts; struct mlx5e_eth_addr_db eth_addr; struct mlx5e_vlan_db vlan; @@ -509,6 +536,7 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; + struct mlx5e_tstamp tstamp; }; #define MLX5E_NET_IP_ALIGN 2 @@ -564,7 +592,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); -bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); @@ -575,6 +603,13 @@ void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); +void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, + struct skb_shared_hwtstamps *hwts); +void mlx5e_timestamp_init(struct mlx5e_priv *priv); +void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); +int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); + int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c new file mode 100644 index 000000000000..be6543570b2b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/clocksource.h> +#include "en.h" + +enum { + MLX5E_CYCLES_SHIFT = 23 +}; + +void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, + struct skb_shared_hwtstamps *hwts) +{ + u64 nsec; + + read_lock(&tstamp->lock); + nsec = timecounter_cyc2time(&tstamp->clock, timestamp); + read_unlock(&tstamp->lock); + + hwts->hwtstamp = ns_to_ktime(nsec); +} + +static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc) +{ + struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, + cycles); + + return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; +} + +static void mlx5e_timestamp_overflow(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, + overflow_work); + + write_lock(&tstamp->lock); + timecounter_read(&tstamp->clock); + write_unlock(&tstamp->lock); + schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); +} + +int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; +} + +static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns = timespec64_to_ns(ts); + + write_lock(&tstamp->lock); + timecounter_init(&tstamp->clock, &tstamp->cycles, ns); + write_unlock(&tstamp->lock); + + return 0; +} + +static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns; + + write_lock(&tstamp->lock); + ns = timecounter_read(&tstamp->clock); + write_unlock(&tstamp->lock); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + + write_lock(&tstamp->lock); + timecounter_adjtime(&tstamp->clock, delta); + write_unlock(&tstamp->lock); + + return 0; +} + +static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff; + int neg_adj = 0; + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + adj = tstamp->nominal_c_mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock(&tstamp->lock); + timecounter_read(&tstamp->clock); + tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : + tstamp->nominal_c_mult + diff; + write_unlock(&tstamp->lock); + + return 0; +} + +static const struct ptp_clock_info mlx5e_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = mlx5e_ptp_adjfreq, + .adjtime = mlx5e_ptp_adjtime, + .gettime64 = mlx5e_ptp_gettime, + .settime64 = mlx5e_ptp_settime, + .enable = NULL, +}; + +static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) +{ + tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +void mlx5e_timestamp_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + u64 ns; + u64 frac = 0; + u32 dev_freq; + + mlx5e_timestamp_init_config(tstamp); + dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); + if (!dev_freq) { + mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return; + } + rwlock_init(&tstamp->lock); + tstamp->cycles.read = mlx5e_read_internal_timer; + tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; + tstamp->cycles.mult = clocksource_khz2mult(dev_freq, + tstamp->cycles.shift); + tstamp->nominal_c_mult = tstamp->cycles.mult; + tstamp->cycles.mask = CLOCKSOURCE_MASK(41); + tstamp->mdev = priv->mdev; + + timecounter_init(&tstamp->clock, &tstamp->cycles, + ktime_to_ns(ktime_get_real())); + + /* Calculate period in seconds to call the overflow watchdog - to make + * sure counter is checked at least once every wrap around. + */ + ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + frac, &frac); + do_div(ns, NSEC_PER_SEC / 2 / HZ); + tstamp->overflow_period = ns; + + INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); + if (tstamp->overflow_period) + schedule_delayed_work(&tstamp->overflow_work, 0); + else + mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); + + /* Configure the PHC */ + tstamp->ptp_info = mlx5e_ptp_clock_info; + snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, + &priv->mdev->pdev->dev); + if (IS_ERR_OR_NULL(tstamp->ptp)) { + mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", + PTR_ERR(tstamp->ptp)); + tstamp->ptp = NULL; + } +} + +void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return; + + if (priv->tstamp.ptp) { + ptp_clock_unregister(priv->tstamp.ptp); + priv->tstamp.ptp = NULL; + } + + cancel_delayed_work_sync(&tstamp->overflow_work); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2e022e900939..65624ac65b4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -855,6 +855,35 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, return err; } +static int mlx5e_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + info->phc_index = priv->tstamp.ptp ? + ptp_clock_index(priv->tstamp.ptp) : -1; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return 0; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | + (BIT(1) << HWTSTAMP_TX_ON); + + info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | + (BIT(1) << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -878,4 +907,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, + .get_ts_info = mlx5e_get_ts_info, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c deleted file mode 100644 index 22d603f78273..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ /dev/null @@ -1,907 +0,0 @@ -/* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/list.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/tcp.h> -#include <linux/mlx5/flow_table.h> -#include "en.h" - -enum { - MLX5E_FULLMATCH = 0, - MLX5E_ALLMULTI = 1, - MLX5E_PROMISC = 2, -}; - -enum { - MLX5E_UC = 0, - MLX5E_MC_IPV4 = 1, - MLX5E_MC_IPV6 = 2, - MLX5E_MC_OTHER = 3, -}; - -enum { - MLX5E_ACTION_NONE = 0, - MLX5E_ACTION_ADD = 1, - MLX5E_ACTION_DEL = 2, -}; - -struct mlx5e_eth_addr_hash_node { - struct hlist_node hlist; - u8 action; - struct mlx5e_eth_addr_info ai; -}; - -static inline int mlx5e_hash_eth_addr(u8 *addr) -{ - return addr[5]; -} - -static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) -{ - struct mlx5e_eth_addr_hash_node *hn; - int ix = mlx5e_hash_eth_addr(addr); - int found = 0; - - hlist_for_each_entry(hn, &hash[ix], hlist) - if (ether_addr_equal_64bits(hn->ai.addr, addr)) { - found = 1; - break; - } - - if (found) { - hn->action = MLX5E_ACTION_NONE; - return; - } - - hn = kzalloc(sizeof(*hn), GFP_ATOMIC); - if (!hn) - return; - - ether_addr_copy(hn->ai.addr, addr); - hn->action = MLX5E_ACTION_ADD; - - hlist_add_head(&hn->hlist, &hash[ix]); -} - -static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) -{ - hlist_del(&hn->hlist); - kfree(hn); -} - -static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai) -{ - void *ft = priv->ft.main; - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); - - if (ai->tt_vec & BIT(MLX5E_TT_ANY)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); -} - -static int mlx5e_get_eth_addr_type(u8 *addr) -{ - if (is_unicast_ether_addr(addr)) - return MLX5E_UC; - - if ((addr[0] == 0x01) && - (addr[1] == 0x00) && - (addr[2] == 0x5e) && - !(addr[3] & 0x80)) - return MLX5E_MC_IPV4; - - if ((addr[0] == 0x33) && - (addr[1] == 0x33)) - return MLX5E_MC_IPV6; - - return MLX5E_MC_OTHER; -} - -static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) -{ - int eth_addr_type; - u32 ret; - - switch (type) { - case MLX5E_FULLMATCH: - eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); - switch (eth_addr_type) { - case MLX5E_UC: - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - case MLX5E_MC_IPV4: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV4) | - 0; - break; - - case MLX5E_MC_IPV6: - ret = - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV6) | - 0; - break; - - case MLX5E_MC_OTHER: - ret = - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - break; - - case MLX5E_ALLMULTI: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - default: /* MLX5E_PROMISC */ - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - return ret; -} - -static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type, - void *flow_context, void *match_criteria) -{ - u8 match_criteria_enable = 0; - void *match_value; - void *dest; - u8 *dmac; - u8 *match_criteria_dmac; - void *ft = priv->ft.main; - u32 *tirn = priv->tirn; - u32 *ft_ix; - u32 tt_vec; - int err; - - match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); - dmac = MLX5_ADDR_OF(fte_match_param, match_value, - outer_headers.dmac_47_16); - match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, - outer_headers.dmac_47_16); - dest = MLX5_ADDR_OF(flow_context, flow_context, destination); - - MLX5_SET(flow_context, flow_context, action, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); - MLX5_SET(flow_context, flow_context, destination_list_size, 1); - MLX5_SET(dest_format_struct, dest, destination_type, - MLX5_FLOW_CONTEXT_DEST_TYPE_TIR); - - switch (type) { - case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - memset(match_criteria_dmac, 0xff, ETH_ALEN); - ether_addr_copy(dmac, ai->addr); - break; - - case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - match_criteria_dmac[0] = 0x01; - dmac[0] = 0x01; - break; - - case MLX5E_PROMISC: - break; - } - - tt_vec = mlx5e_get_tt_vec(ai, type); - - ft_ix = &ai->ft_ix[MLX5E_TT_ANY]; - if (tt_vec & BIT(MLX5E_TT_ANY)) { - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_ANY]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_ANY); - } - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.ethertype); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4]; - if (tt_vec & BIT(MLX5E_TT_IPV4)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6]; - if (tt_vec & BIT(MLX5E_TT_IPV6)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6); - } - - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.ip_protocol); - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_UDP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_UDP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_UDP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_TCP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_TCP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_TCP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_AH); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]; - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_IPSEC_AH]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]; - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_IPSEC_AH]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_ESP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_IPSEC_ESP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_IPSEC_ESP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); - } - - return 0; - -err_del_ai: - mlx5e_del_eth_addr_from_flow_table(priv, ai); - - return err; -} - -static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type) -{ - u32 *flow_context; - u32 *match_criteria; - int err; - - flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + - MLX5_ST_SZ_BYTES(dest_format_struct)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!flow_context || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_eth_addr_rule_out; - } - - err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context, - match_criteria); - if (err) - netdev_err(priv->netdev, "%s: failed\n", __func__); - -add_eth_addr_rule_out: - kvfree(match_criteria); - kvfree(flow_context); - return err; -} - -enum mlx5e_vlan_rule_type { - MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, - MLX5E_VLAN_RULE_TYPE_MATCH_VID, -}; - -static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, - enum mlx5e_vlan_rule_type rule_type, u16 vid) -{ - u8 match_criteria_enable = 0; - u32 *flow_context; - void *match_value; - void *dest; - u32 *match_criteria; - u32 *ft_ix; - int err; - - flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + - MLX5_ST_SZ_BYTES(dest_format_struct)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!flow_context || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_vlan_rule_out; - } - match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); - dest = MLX5_ADDR_OF(flow_context, flow_context, destination); - - MLX5_SET(flow_context, flow_context, action, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); - MLX5_SET(flow_context, flow_context, destination_list_size, 1); - MLX5_SET(dest_format_struct, dest, destination_type, - MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE); - MLX5_SET(dest_format_struct, dest, destination_id, - mlx5_get_flow_table_id(priv->ft.main)); - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.vlan_tag); - - switch (rule_type) { - case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - ft_ix = &priv->vlan.untagged_rule_ft_ix; - break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - ft_ix = &priv->vlan.any_vlan_rule_ft_ix; - MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, - 1); - break; - default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - ft_ix = &priv->vlan.active_vlans_ft_ix[vid]; - MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, - 1); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.first_vid); - MLX5_SET(fte_match_param, match_value, outer_headers.first_vid, - vid); - break; - } - - err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable, - match_criteria, flow_context, ft_ix); - if (err) - netdev_err(priv->netdev, "%s: failed\n", __func__); - -add_vlan_rule_out: - kvfree(match_criteria); - kvfree(flow_context); - return err; -} - -static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, - enum mlx5e_vlan_rule_type rule_type, u16 vid) -{ - switch (rule_type) { - case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.untagged_rule_ft_ix); - break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.any_vlan_rule_ft_ix); - break; - case MLX5E_VLAN_RULE_TYPE_MATCH_VID: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.active_vlans_ft_ix[vid]); - break; - } -} - -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) -{ - if (!priv->vlan.filter_disabled) - return; - - priv->vlan.filter_disabled = false; - if (priv->netdev->flags & IFF_PROMISC) - return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); -} - -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) -{ - if (priv->vlan.filter_disabled) - return; - - priv->vlan.filter_disabled = true; - if (priv->netdev->flags & IFF_PROMISC) - return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); -} - -int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); -} - -int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); - - return 0; -} - -#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ - hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) - -static void mlx5e_execute_action(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_hash_node *hn) -{ - switch (hn->action) { - case MLX5E_ACTION_ADD: - mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); - hn->action = MLX5E_ACTION_NONE; - break; - - case MLX5E_ACTION_DEL: - mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); - mlx5e_del_eth_addr_from_hash(hn); - break; - } -} - -static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) -{ - struct net_device *netdev = priv->netdev; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(netdev); - - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, - priv->netdev->dev_addr); - - netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); - - netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); - - netif_addr_unlock_bh(netdev); -} - -static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) -{ - struct mlx5e_eth_addr_hash_node *hn; - struct hlist_node *tmp; - int i; - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) - mlx5e_execute_action(priv, hn); - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) - mlx5e_execute_action(priv, hn); -} - -static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) -{ - struct mlx5e_eth_addr_hash_node *hn; - struct hlist_node *tmp; - int i; - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) - hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) - hn->action = MLX5E_ACTION_DEL; - - if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) - mlx5e_sync_netdev_addr(priv); - - mlx5e_apply_netdev_addr(priv); -} - -void mlx5e_set_rx_mode_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - set_rx_mode_work); - - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; - struct net_device *ndev = priv->netdev; - - bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); - bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); - bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); - bool broadcast_enabled = rx_mode_enable; - - bool enable_promisc = !ea->promisc_enabled && promisc_enabled; - bool disable_promisc = ea->promisc_enabled && !promisc_enabled; - bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; - bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; - bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; - bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - - if (enable_promisc) { - mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } - if (enable_allmulti) - mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); - if (enable_broadcast) - mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); - - mlx5e_handle_netdev_addr(priv); - - if (disable_broadcast) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); - if (disable_allmulti) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) { - if (!priv->vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); - } - - ea->promisc_enabled = promisc_enabled; - ea->allmulti_enabled = allmulti_enabled; - ea->broadcast_enabled = broadcast_enabled; -} - -void mlx5e_init_eth_addr(struct mlx5e_priv *priv) -{ - ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); -} - -static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) -{ - struct mlx5_flow_table_group *g; - u8 *dmac; - - g = kcalloc(9, sizeof(*g), GFP_KERNEL); - if (!g) - return -ENOMEM; - - g[0].log_sz = 3; - g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.ip_protocol); - - g[1].log_sz = 1; - g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, - outer_headers.ethertype); - - g[2].log_sz = 0; - - g[3].log_sz = 14; - g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, - outer_headers.ip_protocol); - - g[4].log_sz = 13; - g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria, - outer_headers.ethertype); - - g[5].log_sz = 11; - g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - - g[6].log_sz = 2; - g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, - outer_headers.ip_protocol); - - g[7].log_sz = 1; - g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria, - outer_headers.ethertype); - - g[8].log_sz = 0; - g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - priv->ft.main = mlx5_create_flow_table(priv->mdev, 1, - MLX5_FLOW_TABLE_TYPE_NIC_RCV, - 9, g); - kfree(g); - - return priv->ft.main ? 0 : -ENOMEM; -} - -static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) -{ - mlx5_destroy_flow_table(priv->ft.main); -} - -static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) -{ - struct mlx5_flow_table_group *g; - - g = kcalloc(2, sizeof(*g), GFP_KERNEL); - if (!g) - return -ENOMEM; - - g[0].log_sz = 12; - g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.first_vid); - - /* untagged + any vlan id */ - g[1].log_sz = 1; - g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, - outer_headers.vlan_tag); - - priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0, - MLX5_FLOW_TABLE_TYPE_NIC_RCV, - 2, g); - - kfree(g); - return priv->ft.vlan ? 0 : -ENOMEM; -} - -static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) -{ - mlx5_destroy_flow_table(priv->ft.vlan); -} - -int mlx5e_create_flow_tables(struct mlx5e_priv *priv) -{ - int err; - - err = mlx5e_create_main_flow_table(priv); - if (err) - return err; - - err = mlx5e_create_vlan_flow_table(priv); - if (err) - goto err_destroy_main_flow_table; - - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - if (err) - goto err_destroy_vlan_flow_table; - - return 0; - -err_destroy_vlan_flow_table: - mlx5e_destroy_vlan_flow_table(priv); - -err_destroy_main_flow_table: - mlx5e_destroy_main_flow_table(priv); - - return err; -} - -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) -{ - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - mlx5e_destroy_vlan_flow_table(priv); - mlx5e_destroy_main_flow_table(priv); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c new file mode 100644 index 000000000000..80d81abc4820 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -0,0 +1,1224 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/mlx5/fs.h> +#include "en.h" + +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + +enum { + MLX5E_FULLMATCH = 0, + MLX5E_ALLMULTI = 1, + MLX5E_PROMISC = 2, +}; + +enum { + MLX5E_UC = 0, + MLX5E_MC_IPV4 = 1, + MLX5E_MC_IPV6 = 2, + MLX5E_MC_OTHER = 3, +}; + +enum { + MLX5E_ACTION_NONE = 0, + MLX5E_ACTION_ADD = 1, + MLX5E_ACTION_DEL = 2, +}; + +struct mlx5e_eth_addr_hash_node { + struct hlist_node hlist; + u8 action; + struct mlx5e_eth_addr_info ai; +}; + +static inline int mlx5e_hash_eth_addr(u8 *addr) +{ + return addr[5]; +} + +static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +{ + struct mlx5e_eth_addr_hash_node *hn; + int ix = mlx5e_hash_eth_addr(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal_64bits(hn->ai.addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = MLX5E_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->ai.addr, addr); + hn->action = MLX5E_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai) +{ + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); + + if (ai->tt_vec & BIT(MLX5E_TT_ANY)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); +} + +static int mlx5e_get_eth_addr_type(u8 *addr) +{ + if (is_unicast_ether_addr(addr)) + return MLX5E_UC; + + if ((addr[0] == 0x01) && + (addr[1] == 0x00) && + (addr[2] == 0x5e) && + !(addr[3] & 0x80)) + return MLX5E_MC_IPV4; + + if ((addr[0] == 0x33) && + (addr[1] == 0x33)) + return MLX5E_MC_IPV6; + + return MLX5E_MC_OTHER; +} + +static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) +{ + int eth_addr_type; + u32 ret; + + switch (type) { + case MLX5E_FULLMATCH: + eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); + switch (eth_addr_type) { + case MLX5E_UC: + ret = + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + + case MLX5E_MC_IPV4: + ret = + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV4) | + 0; + break; + + case MLX5E_MC_IPV6: + ret = + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV6) | + 0; + break; + + case MLX5E_MC_OTHER: + ret = + BIT(MLX5E_TT_ANY) | + 0; + break; + } + + break; + + case MLX5E_ALLMULTI: + ret = + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + + default: /* MLX5E_PROMISC */ + ret = + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + } + + return ret; +} + +static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, + int type, u32 *mc, u32 *mv) +{ + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + struct mlx5_flow_rule **rule_p; + struct mlx5_flow_table *ft = priv->fts.main.t; + u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers.dmac_47_16); + u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, + outer_headers.dmac_47_16); + u32 *tirn = priv->tirn; + u32 tt_vec; + int err = 0; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + eth_broadcast_addr(mc_dmac); + ether_addr_copy(mv_dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + mc_dmac[0] = 0x01; + mv_dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + tt_vec = mlx5e_get_tt_vec(ai, type); + + if (tt_vec & BIT(MLX5E_TT_ANY)) { + rule_p = &ai->ft_rule[MLX5E_TT_ANY]; + dest.tir_num = tirn[MLX5E_TT_ANY]; + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_ANY); + } + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + if (tt_vec & BIT(MLX5E_TT_IPV4)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; + dest.tir_num = tirn[MLX5E_TT_IPV4]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; + dest.tir_num = tirn[MLX5E_TT_IPV6]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6); + } + + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); + + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; + dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; + dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); + } + + return 0; + +err_del_ai: + err = PTR_ERR(*rule_p); + *rule_p = NULL; + mlx5e_del_eth_addr_from_flow_table(priv, ai); + + return err; +} + +static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type) +{ + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_eth_addr_rule_out; + } + + err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria, + match_value); + +add_eth_addr_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) +{ + struct net_device *ndev = priv->netdev; + int max_list_size; + int list_size; + u16 *vlans; + int vlan; + int err; + int i; + + list_size = 0; + for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) + list_size++; + + max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); + + if (list_size > max_list_size) { + netdev_warn(ndev, + "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", + list_size, max_list_size); + list_size = max_list_size; + } + + vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); + if (!vlans) + return -ENOMEM; + + i = 0; + for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { + if (i >= list_size) + break; + vlans[i++] = vlan; + } + + err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); + if (err) + netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", + err); + + kfree(vlans); + return err; +} + +enum mlx5e_vlan_rule_type { + MLX5E_VLAN_RULE_TYPE_UNTAGGED, + MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_VID, +}; + +static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, + u16 vid, u32 *mc, u32 *mv) +{ + struct mlx5_flow_table *ft = priv->fts.vlan.t; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + struct mlx5_flow_rule **rule_p; + int err = 0; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fts.main.t; + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + rule_p = &priv->vlan.untagged_rule; + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + rule_p = &priv->vlan.any_vlan_rule; + MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ + rule_p = &priv->vlan.active_vlans_rule[vid]; + MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); + MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); + break; + } + + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + + if (IS_ERR(*rule_p)) { + err = PTR_ERR(*rule_p); + *rule_p = NULL; + netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + } + + return err; +} + +static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_vlan_rule_out; + } + + if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) + mlx5e_vport_context_update_vlans(priv); + + err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, + match_value); + +add_vlan_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + if (priv->vlan.untagged_rule) { + mlx5_del_flow_rule(priv->vlan.untagged_rule); + priv->vlan.untagged_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + if (priv->vlan.any_vlan_rule) { + mlx5_del_flow_rule(priv->vlan.any_vlan_rule); + priv->vlan.any_vlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + mlx5e_vport_context_update_vlans(priv); + if (priv->vlan.active_vlans_rule[vid]) { + mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); + priv->vlan.active_vlans_rule[vid] = NULL; + } + mlx5e_vport_context_update_vlans(priv); + break; + } +} + +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +{ + if (!priv->vlan.filter_disabled) + return; + + priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); +} + +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +{ + if (priv->vlan.filter_disabled) + return; + + priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + set_bit(vid, priv->vlan.active_vlans); + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + clear_bit(vid, priv->vlan.active_vlans); + + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + + return 0; +} + +#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +static void mlx5e_execute_action(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_hash_node *hn) +{ + switch (hn->action) { + case MLX5E_ACTION_ADD: + mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + hn->action = MLX5E_ACTION_NONE; + break; + + case MLX5E_ACTION_DEL: + mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); + mlx5e_del_eth_addr_from_hash(hn); + break; + } +} + +static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + priv->netdev->dev_addr); + + netdev_for_each_uc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + + netdev_for_each_mc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + + netif_addr_unlock_bh(netdev); +} + +static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, + u8 addr_array[][ETH_ALEN], int size) +{ + bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); + struct net_device *ndev = priv->netdev; + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_head *addr_list; + struct hlist_node *tmp; + int i = 0; + int hi; + + addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + + if (is_uc) /* Make sure our own address is pushed first */ + ether_addr_copy(addr_array[i++], ndev->dev_addr); + else if (priv->eth_addr.broadcast_enabled) + ether_addr_copy(addr_array[i++], ndev->broadcast); + + mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { + if (ether_addr_equal(ndev->dev_addr, hn->ai.addr)) + continue; + if (i >= size) + break; + ether_addr_copy(addr_array[i++], hn->ai.addr); + } +} + +static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, + int list_type) +{ + bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); + struct mlx5e_eth_addr_hash_node *hn; + u8 (*addr_array)[ETH_ALEN] = NULL; + struct hlist_head *addr_list; + struct hlist_node *tmp; + int max_size; + int size; + int err; + int hi; + + size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); + max_size = is_uc ? + 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); + + addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) + size++; + + if (size > max_size) { + netdev_warn(priv->netdev, + "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", + is_uc ? "UC" : "MC", size, max_size); + size = max_size; + } + + if (size) { + addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); + if (!addr_array) { + err = -ENOMEM; + goto out; + } + mlx5e_fill_addr_array(priv, list_type, addr_array, size); + } + + err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); +out: + if (err) + netdev_err(priv->netdev, + "Failed to modify vport %s list err(%d)\n", + is_uc ? "UC" : "MC", err); + kfree(addr_array); +} + +static void mlx5e_vport_context_update(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + + mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); + mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); + mlx5_modify_nic_vport_promisc(priv->mdev, 0, + ea->allmulti_enabled, + ea->promisc_enabled); +} + +static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_execute_action(priv, hn); + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_execute_action(priv, hn); +} + +static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + hn->action = MLX5E_ACTION_DEL; + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + hn->action = MLX5E_ACTION_DEL; + + if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5e_sync_netdev_addr(priv); + + mlx5e_apply_netdev_addr(priv); +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct net_device *ndev = priv->netdev; + + bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); + bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool broadcast_enabled = rx_mode_enable; + + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + + if (enable_promisc) { + mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } + if (enable_allmulti) + mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + if (enable_broadcast) + mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + + mlx5e_handle_netdev_addr(priv); + + if (disable_broadcast) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + if (disable_allmulti) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } + + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; + + mlx5e_vport_context_update(priv); +} + +static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) +{ + int i; + + for (i = ft->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ft->g[i])) + mlx5_destroy_flow_group(ft->g[i]); + ft->g[i] = NULL; + } + ft->num_groups = 0; +} + +void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +{ + ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); +} + +#define MLX5E_MAIN_GROUP0_SIZE BIT(3) +#define MLX5E_MAIN_GROUP1_SIZE BIT(1) +#define MLX5E_MAIN_GROUP2_SIZE BIT(0) +#define MLX5E_MAIN_GROUP3_SIZE BIT(14) +#define MLX5E_MAIN_GROUP4_SIZE BIT(13) +#define MLX5E_MAIN_GROUP5_SIZE BIT(11) +#define MLX5E_MAIN_GROUP6_SIZE BIT(2) +#define MLX5E_MAIN_GROUP7_SIZE BIT(1) +#define MLX5E_MAIN_GROUP8_SIZE BIT(0) +#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ + MLX5E_MAIN_GROUP1_SIZE +\ + MLX5E_MAIN_GROUP2_SIZE +\ + MLX5E_MAIN_GROUP3_SIZE +\ + MLX5E_MAIN_GROUP4_SIZE +\ + MLX5E_MAIN_GROUP5_SIZE +\ + MLX5E_MAIN_GROUP6_SIZE +\ + MLX5E_MAIN_GROUP7_SIZE +\ + MLX5E_MAIN_GROUP8_SIZE) + +static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) +{ + u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria.outer_headers.dmac_47_16); + int err; + int ix = 0; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP0_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP4_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP5_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP6_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP7_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP8_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + return 0; + +err_destroy_groups: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + + return err; +} + +static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) +{ + u32 *in; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + err = __mlx5e_create_main_groups(ft, in, inlen); + + kvfree(in); + return err; +} + +static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fts.main; + int err; + + ft->num_groups = 0; + ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE); + + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) { + err = -ENOMEM; + goto err_destroy_main_flow_table; + } + + err = mlx5e_create_main_groups(ft); + if (err) + goto err_free_g; + return 0; + +err_free_g: + kfree(ft->g); + +err_destroy_main_flow_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; +} + +static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) +{ + mlx5e_destroy_groups(ft); + kfree(ft->g); + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; +} + +static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_flow_table(&priv->fts.main); +} + +#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_VLAN_GROUP0_SIZE BIT(12) +#define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ + MLX5E_VLAN_GROUP1_SIZE) + +static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) +{ + int err; + int ix = 0; + u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP0_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + return 0; + +err_destroy_groups: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + + return err; +} + +static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) +{ + u32 *in; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + err = __mlx5e_create_vlan_groups(ft, in, inlen); + + kvfree(in); + return err; +} + +static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fts.vlan; + int err; + + ft->num_groups = 0; + ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE); + + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) { + err = -ENOMEM; + goto err_destroy_vlan_flow_table; + } + + err = mlx5e_create_vlan_groups(ft); + if (err) + goto err_free_g; + + return 0; + +err_free_g: + kfree(ft->g); + +err_destroy_vlan_flow_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; +} + +static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_flow_table(&priv->fts.vlan); +} + +int mlx5e_create_flow_tables(struct mlx5e_priv *priv) +{ + int err; + + priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + + if (!priv->fts.ns) + return -EINVAL; + + err = mlx5e_create_vlan_flow_table(priv); + if (err) + return err; + + err = mlx5e_create_main_flow_table(priv); + if (err) + goto err_destroy_vlan_flow_table; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_main_flow_table; + + return 0; + +err_destroy_main_flow_table: + mlx5e_destroy_main_flow_table(priv); +err_destroy_vlan_flow_table: + mlx5e_destroy_vlan_flow_table(priv); + + return err; +} + +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + mlx5e_destroy_main_flow_table(priv); + mlx5e_destroy_vlan_flow_table(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1e52db32c73d..5c74a734f158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -30,8 +30,9 @@ * SOFTWARE. */ -#include <linux/mlx5/flow_table.h> +#include <linux/mlx5/fs.h> #include "en.h" +#include "eswitch.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -63,7 +64,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv) u8 port_state; port_state = mlx5_query_vport_state(mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); if (port_state == VPORT_STATE_UP) netif_carrier_on(priv->netdev); @@ -350,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->pdev = c->pdev; rq->netdev = c->netdev; + rq->tstamp = &priv->tstamp; rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; @@ -506,6 +508,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_sq_db(struct mlx5e_sq *sq) { + kfree(sq->wqe_info); kfree(sq->dma_fifo); kfree(sq->skb); } @@ -518,8 +521,10 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, numa); + sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL, + numa); - if (!sq->skb || !sq->dma_fifo) { + if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) { mlx5e_free_sq_db(sq); return -ENOMEM; } @@ -567,6 +572,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); sq->pdev = c->pdev; + sq->tstamp = &priv->tstamp; sq->mkey_be = c->mkey_be; sq->channel = c; sq->tc = tc; @@ -1020,6 +1026,7 @@ err_close_tx_cqs: err_napi_del: netif_napi_del(&c->napi); + napi_hash_del(&c->napi); kfree(c); return err; @@ -1033,6 +1040,10 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); netif_napi_del(&c->napi); + + napi_hash_del(&c->napi); + synchronize_rcu(); + kfree(c); } @@ -1421,6 +1432,7 @@ int mlx5e_open_locked(struct net_device *netdev) mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); + mlx5e_timestamp_init(priv); schedule_delayed_work(&priv->update_stats_work, 0); @@ -1457,6 +1469,7 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_timestamp_cleanup(priv); mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); mlx5e_close_channels(priv); @@ -1926,6 +1939,91 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) return err; } +static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx5e_hwstamp_set(dev, ifr); + case SIOCGHWTSTAMP: + return mlx5e_hwstamp_get(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac); +} + +static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1, + vlan, qos); +} + +static int mlx5_vport_link2ifla(u8 esw_link) +{ + switch (esw_link) { + case MLX5_ESW_VPORT_ADMIN_STATE_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case MLX5_ESW_VPORT_ADMIN_STATE_UP: + return IFLA_VF_LINK_STATE_ENABLE; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +static int mlx5_ifla_link2vport(u8 ifla_link) +{ + switch (ifla_link) { + case IFLA_VF_LINK_STATE_DISABLE: + return MLX5_ESW_VPORT_ADMIN_STATE_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return MLX5_ESW_VPORT_ADMIN_STATE_UP; + } + return MLX5_ESW_VPORT_ADMIN_STATE_AUTO; +} + +static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, + int link_state) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, + mlx5_ifla_link2vport(link_state)); +} + +static int mlx5e_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); + if (err) + return err; + ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate); + return 0; +} + +static int mlx5e_get_vf_stats(struct net_device *dev, + int vf, struct ifla_vf_stats *vf_stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, + vf_stats); +} + static struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -1937,6 +2035,7 @@ static struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -2023,7 +2122,12 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr); + mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr); + if (is_zero_ether_addr(netdev->dev_addr) && + !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { + eth_hw_addr_random(netdev); + mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); + } } static void mlx5e_build_netdev(struct net_device *netdev) @@ -2036,6 +2140,14 @@ static void mlx5e_build_netdev(struct net_device *netdev) if (priv->params.num_tc > 1) mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac; + mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan; + mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config; + mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state; + mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats; + } + netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index cf0098596e85..dd959d929aad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,8 +33,14 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <net/busy_poll.h> #include "en.h" +static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) +{ + return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; +} + static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { @@ -189,6 +195,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, { struct net_device *netdev = rq->netdev; u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; skb_put(skb, cqe_bcnt); @@ -201,6 +208,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, rq->stats.lro_bytes += cqe_bcnt; } + if (unlikely(mlx5e_rx_hw_stamp(tstamp))) + mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); + mlx5e_handle_csum(netdev, cqe, rq, skb); skb->protocol = eth_type_trans(skb, netdev); @@ -215,16 +225,16 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, be16_to_cpu(cqe->vlan_info)); } -bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - int i; + int work_done; /* avoid accessing cq (dma coherent memory) if not needed */ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return false; + return 0; - for (i = 0; i < budget; i++) { + for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; struct sk_buff *skb; @@ -269,10 +279,8 @@ wq_ll_pop: /* ensure cq space is freed before enabling more cqes */ wmb(); - if (i == budget) { + if (work_done == budget) set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return true; - } - return false; + return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1341b1d3c421..2c3fba0fff54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) return &sq->dma_fifo[i & sq->dma_fifo_mask]; } -static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) { int i; - for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + for (i = 0; i < num_dma; i++) { struct mlx5e_sq_dma *last_pushed_dma = mlx5e_dma_get(sq, --sq->dma_fifo_pc); @@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, return MLX5E_MIN_INLINE; } -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) +static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, + unsigned int *skb_len, + unsigned int len) +{ + *skb_len -= len; + *skb_data += len; +} + +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, + unsigned char **skb_data, + unsigned int *skb_len) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - skb_copy_from_linear_data(skb, vhdr, cpy1_sz); - skb_pull_inline(skb, cpy1_sz); + memcpy(vhdr, *skb_data, cpy1_sz); + mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto, - cpy2_sz); - skb_pull_inline(skb, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); + mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); } static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) @@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi]; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + unsigned char *skb_data = skb->data; + unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; bool bf = false; @@ -192,8 +204,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) opcode = MLX5_OPCODE_LSO; ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); payload_len = skb->len - ihs; - MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + - (skb_shinfo(skb)->gso_segs - 1) * ihs; + wi->num_bytes = skb->len + + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { @@ -201,16 +213,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, - ETH_ZLEN); + wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); + mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, + &skb_len); ihs += VLAN_HLEN; } else { - skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); - skb_pull_inline(skb, ihs); + memcpy(eseg->inline_hdr_start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); } eseg->inline_hdr_sz = cpu_to_be16(ihs); @@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) MLX5_SEND_WQE_DS); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; - MLX5E_TX_SKB_CB(skb)->num_dma = 0; + wi->num_dma = 0; - headlen = skb_headlen(skb); + headlen = skb_len - skb->data_len; if (headlen) { - dma_addr = dma_map_single(sq->pdev, skb->data, headlen, + dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; @@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->byte_count = cpu_to_be32(headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); - MLX5E_TX_SKB_CB(skb)->num_dma++; + wi->num_dma++; dseg++; } @@ -253,23 +265,25 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - MLX5E_TX_SKB_CB(skb)->num_dma++; + wi->num_dma++; dseg++; } - ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; + ds_cnt += wi->num_dma; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->skb[pi] = skb; - MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, - MLX5_SEND_WQEBB_NUM_DS); - sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + sq->pc += wi->num_wqebbs; + + netdev_tx_sent_queue(sq->txq, wi->num_bytes); - netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); @@ -280,7 +294,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) int bf_sz = 0; if (bf && sq->uar_bf_map) - bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; + bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_tx_notify_hw(sq, wqe, bf_sz); @@ -297,7 +311,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dma_unmap_wqe_err: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, skb); + mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); dev_kfree_skb_any(skb); @@ -352,6 +366,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); do { + struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; u16 ci; int j; @@ -360,6 +375,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ci = sqcc & sq->wq.sz_m1; skb = sq->skb[ci]; + wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ sq->stats.nop++; @@ -367,7 +383,16 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) continue; } - for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP)) { + struct skb_shared_hwtstamps hwts = {}; + + mlx5e_fill_hwstamp(sq->tstamp, + get_cqe_ts(cqe), &hwts); + skb_tstamp_tx(skb, &hwts); + } + + for (j = 0; j < wi->num_dma; j++) { struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, dma_fifo_cc++); @@ -375,8 +400,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) } npkts++; - nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; - sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; dev_kfree_skb(skb); } while (!last_wqe); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 2c7cb6755d1d..4ac8d716dbdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -54,6 +54,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); bool busy = false; + int work_done; int i; clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); @@ -61,26 +62,26 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); - busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); - + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); + busy |= work_done == budget; busy |= mlx5e_post_rx_wqes(&c->rq); if (busy) return budget; - napi_complete(napi); + napi_complete_done(napi, work_done); /* avoid losing completion event during/after polling cqs */ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { napi_schedule(napi); - return 0; + return work_done; } for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); mlx5e_cq_arm(&c->rq.cq); - return 0; + return work_done; } void mlx5e_completion_event(struct mlx5_core_cq *mcq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 713ead583347..23c244a7e5d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -35,6 +35,9 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -287,6 +290,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) break; #endif +#ifdef CONFIG_MLX5_CORE_EN + case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: + mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); + break; +#endif default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -459,6 +467,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pg)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && + MLX5_CAP_GEN(dev, vport_group_manager) && + mlx5_core_is_pf(dev)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c new file mode 100644 index 000000000000..bc3d9f8a75c1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -0,0 +1,1097 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "eswitch.h" + +#define UPLINK_VPORT 0xFFFF + +#define MLX5_DEBUG_ESWITCH_MASK BIT(3) + +#define esw_info(dev, format, ...) \ + pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_warn(dev, format, ...) \ + pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_debug(dev, format, ...) \ + mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) + +enum { + MLX5_ACTION_NONE = 0, + MLX5_ACTION_ADD = 1, + MLX5_ACTION_DEL = 2, +}; + +/* E-Switch UC L2 table hash node */ +struct esw_uc_addr { + struct l2addr_node node; + u32 table_index; + u32 vport; +}; + +/* E-Switch MC FDB table hash node */ +struct esw_mc_addr { /* SRIOV only */ + struct l2addr_node node; + struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ + u32 refcnt; +}; + +/* Vport UC/MC hash node */ +struct vport_addr { + struct l2addr_node node; + u8 action; + u32 vport; + struct mlx5_flow_rule *flow_rule; /* SRIOV only */ +}; + +enum { + UC_ADDR_CHANGE = BIT(0), + MC_ADDR_CHANGE = BIT(1), +}; + +/* Vport context events */ +#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ + MC_ADDR_CHANGE) + +static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, + u32 events_mask) +{ + int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; + int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int err; + + memset(out, 0, sizeof(out)); + memset(in, 0, sizeof(in)); + + MLX5_SET(modify_nic_vport_context_in, in, + opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); + + if (events_mask & UC_ADDR_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_uc_address_change, 1); + if (events_mask & MC_ADDR_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_mc_address_change, 1); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + goto ex; + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto ex; + return 0; +ex: + return err; +} + +/* E-Switch vport context HW commands */ +static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); + + MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); +} + +static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, + u16 *vlan, u8 *qos) +{ + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)]; + int err; + bool cvlan_strip; + bool cvlan_insert; + + memset(out, 0, sizeof(out)); + + *vlan = 0; + *qos = 0; + + if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || + !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) + return -ENOTSUPP; + + err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out)); + if (err) + goto out; + + cvlan_strip = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.vport_cvlan_strip); + + cvlan_insert = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.vport_cvlan_insert); + + if (cvlan_strip || cvlan_insert) { + *vlan = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.cvlan_id); + *qos = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.cvlan_pcp); + } + + esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n", + vport, *vlan, *qos); +out: + return err; +} + +static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, + void *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)]; + + memset(out, 0, sizeof(out)); + + MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + + MLX5_SET(modify_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); + + return mlx5_cmd_exec_check_status(dev, in, inlen, + out, sizeof(out)); +} + +static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, + u16 vlan, u8 qos, bool set) +{ + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || + !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) + return -ENOTSUPP; + + esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", + vport, vlan, qos, set); + + if (set) { + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_strip, 1); + /* insert only if no vlan in packet */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, 1); + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.cvlan_pcp, qos); + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.cvlan_id, vlan); + } + + MLX5_SET(modify_esw_vport_context_in, in, + field_select.vport_cvlan_strip, 1); + MLX5_SET(modify_esw_vport_context_in, in, + field_select.vport_cvlan_insert, 1); + + return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); +} + +/* HW L2 Table (MPFS) management */ +static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, + u8 *mac, u8 vlan_valid, u16 vlan) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)]; + u8 *in_mac_addr; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_l2_table_entry_in, in, opcode, + MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, table_index, index); + MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid); + MLX5_SET(set_l2_table_entry_in, in, vlan, vlan); + + in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); + ether_addr_copy(&in_mac_addr[2], mac); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) +{ + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(delete_l2_table_entry_in, in, opcode, + MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); + MLX5_SET(delete_l2_table_entry_in, in, table_index, index); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) +{ + int err = 0; + + *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size); + if (*ix >= l2_table->size) + err = -ENOSPC; + else + __set_bit(*ix, l2_table->bitmap); + + return err; +} + +static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix) +{ + __clear_bit(ix, l2_table->bitmap); +} + +static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac, + u8 vlan_valid, u16 vlan, + u32 *index) +{ + struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; + int err; + + err = alloc_l2_table_index(l2_table, index); + if (err) + return err; + + err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan); + if (err) + free_l2_table_index(l2_table, *index); + + return err; +} + +static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) +{ + struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; + + del_l2_table_entry_cmd(dev, index); + free_l2_table_index(l2_table, index); +} + +/* E-Switch FDB */ +static struct mlx5_flow_rule * +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +{ + int match_header = MLX5_MATCH_OUTER_HEADERS; + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule = NULL; + u32 *match_v; + u32 *match_c; + u8 *dmac_v; + u8 *dmac_c; + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + pr_warn("FDB: Failed to alloc match parameters\n"); + goto out; + } + dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers.dmac_47_16); + dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers.dmac_47_16); + + ether_addr_copy(dmac_v, mac); + /* Match criteria mask */ + memset(dmac_c, 0xff, 6); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = vport; + + esw_debug(esw->dev, + "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", + dmac_v, dmac_c, vport); + flow_rule = + mlx5_add_flow_rule(esw->fdb_table.fdb, + match_header, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR_OR_NULL(flow_rule)) { + pr_warn( + "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", + dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); + flow_rule = NULL; + } +out: + kfree(match_v); + kfree(match_c); + return flow_rule; +} + +static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *g; + void *match_criteria; + int table_size; + u32 *flow_group_in; + u8 *dmac; + int err = 0; + + esw_debug(dev, "Create FDB log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -ENOMEM; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + memset(flow_group_in, 0, inlen); + + table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + fdb = mlx5_create_flow_table(root_ns, 0, table_size); + if (IS_ERR_OR_NULL(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create FDB Table err %d\n", err); + goto out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + eth_broadcast_addr(dmac); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create flow group err(%d)\n", err); + goto out; + } + + esw->fdb_table.addr_grp = g; + esw->fdb_table.fdb = fdb; +out: + kfree(flow_group_in); + if (err && !IS_ERR_OR_NULL(fdb)) + mlx5_destroy_flow_table(fdb); + return err; +} + +static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.fdb) + return; + + esw_debug(esw->dev, "Destroy FDB Table\n"); + mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw->fdb_table.fdb = NULL; + esw->fdb_table.addr_grp = NULL; +} + +/* E-Switch vport UC/MC lists management */ +typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, + struct vport_addr *vaddr); + +static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->l2_table.l2_hash; + struct esw_uc_addr *esw_uc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + int err; + + esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); + if (esw_uc) { + esw_warn(esw->dev, + "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", + mac, vport, esw_uc->vport); + return -EEXIST; + } + + esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); + if (!esw_uc) + return -ENOMEM; + esw_uc->vport = vport; + + err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index); + if (err) + goto abort; + + if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ + vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); + + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", + vport, mac, esw_uc->table_index, vaddr->flow_rule); + return err; +abort: + l2addr_hash_del(esw_uc); + return err; +} + +static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->l2_table.l2_hash; + struct esw_uc_addr *esw_uc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); + if (!esw_uc || esw_uc->vport != vport) { + esw_debug(esw->dev, + "MAC(%pM) doesn't belong to vport (%d)\n", + mac, vport); + return -EINVAL; + } + esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", + vport, mac, esw_uc->table_index, vaddr->flow_rule); + + del_l2_table_entry(esw->dev, esw_uc->table_index); + + if (vaddr->flow_rule) + mlx5_del_flow_rule(vaddr->flow_rule); + vaddr->flow_rule = NULL; + + l2addr_hash_del(esw_uc); + return 0; +} + +static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->mc_table; + struct esw_mc_addr *esw_mc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + if (!esw->fdb_table.fdb) + return 0; + + esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); + if (esw_mc) + goto add; + + esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); + if (!esw_mc) + return -ENOMEM; + + esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ + esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); +add: + esw_mc->refcnt++; + /* Forward MC MAC to vport */ + vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); + esw_debug(esw->dev, + "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", + vport, mac, vaddr->flow_rule, + esw_mc->refcnt, esw_mc->uplink_rule); + return 0; +} + +static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->mc_table; + struct esw_mc_addr *esw_mc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + if (!esw->fdb_table.fdb) + return 0; + + esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); + if (!esw_mc) { + esw_warn(esw->dev, + "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", + mac, vport); + return -EINVAL; + } + esw_debug(esw->dev, + "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", + vport, mac, vaddr->flow_rule, esw_mc->refcnt, + esw_mc->uplink_rule); + + if (vaddr->flow_rule) + mlx5_del_flow_rule(vaddr->flow_rule); + vaddr->flow_rule = NULL; + + if (--esw_mc->refcnt) + return 0; + + if (esw_mc->uplink_rule) + mlx5_del_flow_rule(esw_mc->uplink_rule); + + l2addr_hash_del(esw_mc); + return 0; +} + +/* Apply vport UC/MC list to HW l2 table and FDB table */ +static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, + u32 vport_num, int list_type) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; + vport_addr_action vport_addr_add; + vport_addr_action vport_addr_del; + struct vport_addr *addr; + struct l2addr_node *node; + struct hlist_head *hash; + struct hlist_node *tmp; + int hi; + + vport_addr_add = is_uc ? esw_add_uc_addr : + esw_add_mc_addr; + vport_addr_del = is_uc ? esw_del_uc_addr : + esw_del_mc_addr; + + hash = is_uc ? vport->uc_list : vport->mc_list; + for_each_l2hash_node(node, tmp, hash, hi) { + addr = container_of(node, struct vport_addr, node); + switch (addr->action) { + case MLX5_ACTION_ADD: + vport_addr_add(esw, addr); + addr->action = MLX5_ACTION_NONE; + break; + case MLX5_ACTION_DEL: + vport_addr_del(esw, addr); + l2addr_hash_del(addr); + break; + } + } +} + +/* Sync vport UC/MC list from vport context */ +static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, + u32 vport_num, int list_type) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; + u8 (*mac_list)[ETH_ALEN]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_head *hash; + struct hlist_node *tmp; + int size; + int err; + int hi; + int i; + + size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : + MLX5_MAX_MC_PER_VPORT(esw->dev); + + mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); + if (!mac_list) + return; + + hash = is_uc ? vport->uc_list : vport->mc_list; + + for_each_l2hash_node(node, tmp, hash, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + + err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, + mac_list, &size); + if (err) + return; + esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", + vport_num, is_uc ? "UC" : "MC", size); + + for (i = 0; i < size; i++) { + if (is_uc && !is_valid_ether_addr(mac_list[i])) + continue; + + if (!is_uc && !is_multicast_ether_addr(mac_list[i])) + continue; + + addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); + if (addr) { + addr->action = MLX5_ACTION_NONE; + continue; + } + + addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, + GFP_KERNEL); + if (!addr) { + esw_warn(esw->dev, + "Failed to add MAC(%pM) to vport[%d] DB\n", + mac_list[i], vport_num); + continue; + } + addr->vport = vport_num; + addr->action = MLX5_ACTION_ADD; + } + kfree(mac_list); +} + +static void esw_vport_change_handler(struct work_struct *work) +{ + struct mlx5_vport *vport = + container_of(work, struct mlx5_vport, vport_change_handler); + struct mlx5_core_dev *dev = vport->dev; + struct mlx5_eswitch *esw = dev->priv.eswitch; + u8 mac[ETH_ALEN]; + + mlx5_query_nic_vport_mac_address(dev, vport->vport, mac); + esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", + vport->vport, mac); + + if (vport->enabled_events & UC_ADDR_CHANGE) { + esw_update_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_UC); + esw_apply_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_UC); + } + + if (vport->enabled_events & MC_ADDR_CHANGE) { + esw_update_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_MC); + esw_apply_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_MC); + } + + esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); + if (vport->enabled) + arm_vport_context_events_cmd(dev, vport->vport, + vport->enabled_events); +} + +static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, + int enable_events) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + unsigned long flags; + + WARN_ON(vport->enabled); + + esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + MLX5_ESW_VPORT_ADMIN_STATE_AUTO); + + /* Sync with current vport context */ + vport->enabled_events = enable_events; + esw_vport_change_handler(&vport->vport_change_handler); + + spin_lock_irqsave(&vport->lock, flags); + vport->enabled = true; + spin_unlock_irqrestore(&vport->lock, flags); + + arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); + + esw->enabled_vports++; + esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); +} + +static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_node *tmp; + int hi; + + for_each_l2hash_node(node, tmp, vport->uc_list, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC); + + for_each_l2hash_node(node, tmp, vport->mc_list, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC); +} + +static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + unsigned long flags; + + if (!vport->enabled) + return; + + esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + spin_lock_irqsave(&vport->lock, flags); + vport->enabled = false; + vport->enabled_events = 0; + spin_unlock_irqrestore(&vport->lock, flags); + + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + /* Wait for current already scheduled events to complete */ + flush_workqueue(esw->work_queue); + /* Disable events from this vport */ + arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + /* We don't assume VFs will cleanup after themselves */ + esw_cleanup_vport(esw, vport_num); + esw->enabled_vports--; +} + +/* Public E-Switch API */ +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) +{ + int err; + int i; + + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return 0; + + if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { + esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); + return -ENOTSUPP; + } + + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); + + esw_disable_vport(esw, 0); + + err = esw_create_fdb_table(esw, nvfs + 1); + if (err) + goto abort; + + for (i = 0; i <= nvfs; i++) + esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); + + esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", + esw->enabled_vports); + return 0; + +abort: + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + return err; +} + +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) +{ + int i; + + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", + esw->enabled_vports); + + for (i = 0; i < esw->total_vports; i++) + esw_disable_vport(esw, i); + + esw_destroy_fdb_table(esw); + + /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); +} + +int mlx5_eswitch_init(struct mlx5_core_dev *dev) +{ + int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); + int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev); + struct mlx5_eswitch *esw; + int vport_num; + int err; + + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return 0; + + esw_info(dev, + "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", + total_vports, l2_table_size, + MLX5_MAX_UC_PER_VPORT(dev), + MLX5_MAX_MC_PER_VPORT(dev)); + + esw = kzalloc(sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = dev; + + esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), + sizeof(uintptr_t), GFP_KERNEL); + if (!esw->l2_table.bitmap) { + err = -ENOMEM; + goto abort; + } + esw->l2_table.size = l2_table_size; + + esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); + if (!esw->work_queue) { + err = -ENOMEM; + goto abort; + } + + esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), + GFP_KERNEL); + if (!esw->vports) { + err = -ENOMEM; + goto abort; + } + + for (vport_num = 0; vport_num < total_vports; vport_num++) { + struct mlx5_vport *vport = &esw->vports[vport_num]; + + vport->vport = vport_num; + vport->dev = dev; + INIT_WORK(&vport->vport_change_handler, + esw_vport_change_handler); + spin_lock_init(&vport->lock); + } + + esw->total_vports = total_vports; + esw->enabled_vports = 0; + + dev->priv.eswitch = esw; + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + /* VF Vports will be enabled when SRIOV is enabled */ + return 0; +abort: + if (esw->work_queue) + destroy_workqueue(esw->work_queue); + kfree(esw->l2_table.bitmap); + kfree(esw->vports); + kfree(esw); + return err; +} + +void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) +{ + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_info(esw->dev, "cleanup\n"); + esw_disable_vport(esw, 0); + + esw->dev->priv.eswitch = NULL; + destroy_workqueue(esw->work_queue); + kfree(esw->l2_table.bitmap); + kfree(esw->vports); + kfree(esw); +} + +void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) +{ + struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change; + u16 vport_num = be16_to_cpu(vc_eqe->vport_num); + struct mlx5_vport *vport; + + if (!esw) { + pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", + vport_num); + return; + } + + vport = &esw->vports[vport_num]; + spin_lock(&vport->lock); + if (vport->enabled) + queue_work(esw->work_queue, &vport->vport_change_handler); + spin_unlock(&vport->lock); +} + +/* Vport Administration */ +#define ESW_ALLOWED(esw) \ + (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) +#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) + +int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + int vport, u8 mac[ETH_ALEN]) +{ + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); + if (err) { + mlx5_core_warn(esw->dev, + "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); + return err; + } + + return err; +} + +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, + int vport, int link_state) +{ + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + return mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport, link_state); +} + +int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, + int vport, struct ifla_vf_info *ivi) +{ + u16 vlan; + u8 qos; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vport - 1; + + mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac); + ivi->linkstate = mlx5_query_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport); + query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); + ivi->vlan = vlan; + ivi->qos = qos; + ivi->spoofchk = 0; + + return 0; +} + +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + int vport, u16 vlan, u8 qos) +{ + int set = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan || qos) + set = 1; + + return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); +} + +int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, + int vport, + struct ifla_vf_stats *vf_stats) +{ + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + int err = 0; + u32 *out; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vport_counter_in, in, other_vport, 1); + + memset(out, 0, outlen); + err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen); + if (err) + goto free_out; + + #define MLX5_GET_CTR(p, x) \ + MLX5_GET64(query_vport_counter_out, p, x) + + memset(vf_stats, 0, sizeof(*vf_stats)); + vf_stats->rx_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets) + + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_eth_broadcast.packets); + + vf_stats->rx_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets) + + MLX5_GET_CTR(out, received_eth_multicast.octets) + + MLX5_GET_CTR(out, received_eth_broadcast.octets); + + vf_stats->tx_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + + vf_stats->tx_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + vf_stats->multicast = + MLX5_GET_CTR(out, received_eth_multicast.packets); + + vf_stats->broadcast = + MLX5_GET_CTR(out, received_eth_broadcast.packets); + +free_out: + kvfree(out); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h new file mode 100644 index 000000000000..3416a428f70f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_ESWITCH_H__ +#define __MLX5_ESWITCH_H__ + +#include <linux/if_ether.h> +#include <linux/if_link.h> +#include <linux/mlx5/device.h> + +#define MLX5_MAX_UC_PER_VPORT(dev) \ + (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) + +#define MLX5_MAX_MC_PER_VPORT(dev) \ + (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) + +#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) +#define MLX5_L2_ADDR_HASH(addr) (addr[5]) + +/* L2 -mac address based- hash helpers */ +struct l2addr_node { + struct hlist_node hlist; + u8 addr[ETH_ALEN]; +}; + +#define for_each_l2hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +#define l2addr_hash_find(hash, mac, type) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + bool found = false; \ + type *ptr = NULL; \ + \ + hlist_for_each_entry(ptr, &hash[ix], node.hlist) \ + if (ether_addr_equal(ptr->node.addr, mac)) {\ + found = true; \ + break; \ + } \ + if (!found) \ + ptr = NULL; \ + ptr; \ +}) + +#define l2addr_hash_add(hash, mac, type, gfp) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + type *ptr = NULL; \ + \ + ptr = kzalloc(sizeof(type), gfp); \ + if (ptr) { \ + ether_addr_copy(ptr->node.addr, mac); \ + hlist_add_head(&ptr->node.hlist, &hash[ix]);\ + } \ + ptr; \ +}) + +#define l2addr_hash_del(ptr) ({ \ + hlist_del(&ptr->node.hlist); \ + kfree(ptr); \ +}) + +struct mlx5_vport { + struct mlx5_core_dev *dev; + int vport; + struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct work_struct vport_change_handler; + + /* This spinlock protects access to vport data, between + * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event" + * once vport marked as disabled new interrupts are discarded. + */ + spinlock_t lock; /* vport events sync */ + bool enabled; + u16 enabled_events; +}; + +struct mlx5_l2_table { + struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE]; + u32 size; + unsigned long *bitmap; +}; + +struct mlx5_eswitch_fdb { + void *fdb; + struct mlx5_flow_group *addr_grp; +}; + +struct mlx5_eswitch { + struct mlx5_core_dev *dev; + struct mlx5_l2_table l2_table; + struct mlx5_eswitch_fdb fdb_table; + struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; + struct workqueue_struct *work_queue; + struct mlx5_vport *vports; + int total_vports; + int enabled_vports; +}; + +/* E-Switch API */ +int mlx5_eswitch_init(struct mlx5_core_dev *dev); +void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); +void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs); +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); +int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + int vport, u8 mac[ETH_ALEN]); +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, + int vport, int link_state); +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + int vport, u16 vlan, u8 qos); +int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, + int vport, struct ifla_vf_info *ivi); +int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, + int vport, + struct ifla_vf_stats *vf_stats); + +#endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c deleted file mode 100644 index ca90b9bc3b95..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/flow_table.h> -#include "mlx5_core.h" - -struct mlx5_ftg { - struct mlx5_flow_table_group g; - u32 id; - u32 start_ix; -}; - -struct mlx5_flow_table { - struct mlx5_core_dev *dev; - u8 level; - u8 type; - u32 id; - struct mutex mutex; /* sync bitmap alloc */ - u16 num_groups; - struct mlx5_ftg *group; - unsigned long *bitmap; - u32 size; -}; - -static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix, - u32 flow_index, void *flow_context) -{ - u32 out[MLX5_ST_SZ_DW(set_fte_out)]; - u32 *in; - void *in_flow_context; - int fcdls = - MLX5_GET(flow_context, flow_context, destination_list_size) * - MLX5_ST_SZ_BYTES(dest_format_struct); - int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); - return -ENOMEM; - } - - MLX5_SET(set_fte_in, in, table_type, ft->type); - MLX5_SET(set_fte_in, in, table_id, ft->id); - MLX5_SET(set_fte_in, in, flow_index, flow_index); - MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); - - in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); - memcpy(in_flow_context, flow_context, - MLX5_ST_SZ_BYTES(flow_context) + fcdls); - - MLX5_SET(flow_context, in_flow_context, group_id, - ft->group[group_ix].id); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, - sizeof(out)); - kvfree(in); - - return err; -} - -static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index) -{ - u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; - u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v) - MLX5_SET_DFTEI(in, table_type, ft->type); - MLX5_SET_DFTEI(in, table_id, ft->id); - MLX5_SET_DFTEI(in, flow_index, flow_index); - MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); - - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i) -{ - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v) - MLX5_SET_DFGI(in, table_type, ft->type); - MLX5_SET_DFGI(in, table_id, ft->id); - MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); - MLX5_SET_DFGI(in, group_id, ft->group[i].id); - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i) -{ - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; - u32 *in; - void *in_match_criteria; - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table_group *g = &ft->group[i].g; - u32 start_ix = ft->group[i].start_ix; - u32 end_ix = start_ix + (1 << g->log_sz) - 1; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); - return -ENOMEM; - } - in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria); - - memset(out, 0, sizeof(out)); - -#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v) - MLX5_SET_CFGI(in, table_type, ft->type); - MLX5_SET_CFGI(in, table_id, ft->id); - MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); - MLX5_SET_CFGI(in, start_flow_index, start_ix); - MLX5_SET_CFGI(in, end_flow_index, end_ix); - MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable); - - memcpy(in_match_criteria, g->match_criteria, - MLX5_ST_SZ_BYTES(fte_match_param)); - - err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, - sizeof(out)); - if (!err) - ft->group[i].id = MLX5_GET(create_flow_group_out, out, - group_id); - - kvfree(in); - - return err; -} - -static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft) -{ - int i; - - for (i = 0; i < ft->num_groups; i++) - mlx5_destroy_flow_group_cmd(ft, i); -} - -static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft) -{ - int err; - int i; - - for (i = 0; i < ft->num_groups; i++) { - err = mlx5_create_flow_group_cmd(ft, i); - if (err) - goto err_destroy_flow_table_groups; - } - - return 0; - -err_destroy_flow_table_groups: - for (i--; i >= 0; i--) - mlx5_destroy_flow_group_cmd(ft, i); - - return err; -} - -static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft) -{ - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; - int err; - - memset(in, 0, sizeof(in)); - - MLX5_SET(create_flow_table_in, in, table_type, ft->type); - MLX5_SET(create_flow_table_in, in, level, ft->level); - MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size)); - - MLX5_SET(create_flow_table_in, in, opcode, - MLX5_CMD_OP_CREATE_FLOW_TABLE); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, - sizeof(out)); - if (err) - return err; - - ft->id = MLX5_GET(create_flow_table_out, out, table_id); - - return 0; -} - -static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft) -{ - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v) - MLX5_SET_DFTI(in, table_type, ft->type); - MLX5_SET_DFTI(in, table_id, ft->id); - MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); - - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable, - u32 *match_criteria, int *group_ix) -{ - void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria, - outer_headers); - void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria, - misc_parameters); - void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria, - inner_headers); - int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); - int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc); - int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); - int i; - - for (i = 0; i < ft->num_groups; i++) { - struct mlx5_flow_table_group *g = &ft->group[i].g; - void *gmc_outer = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - outer_headers); - void *gmc_misc = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - misc_parameters); - void *gmc_inner = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - inner_headers); - - if (g->match_criteria_enable != match_criteria_enable) - continue; - - if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) - if (memcmp(mc_outer, gmc_outer, mc_outer_sz)) - continue; - - if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS) - if (memcmp(mc_misc, gmc_misc, mc_misc_sz)) - continue; - - if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS) - if (memcmp(mc_inner, gmc_inner, mc_inner_sz)) - continue; - - *group_ix = i; - return 0; - } - - return -EINVAL; -} - -static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix) -{ - struct mlx5_ftg *g = &ft->group[group_ix]; - int err = 0; - - mutex_lock(&ft->mutex); - - *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix); - if (*ix >= (g->start_ix + (1 << g->g.log_sz))) - err = -ENOSPC; - else - __set_bit(*ix, ft->bitmap); - - mutex_unlock(&ft->mutex); - - return err; -} - -static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix) -{ - __clear_bit(ix, ft->bitmap); -} - -int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, - void *match_criteria, void *flow_context, - u32 *flow_index) -{ - struct mlx5_flow_table *ft = flow_table; - int group_ix; - int err; - - err = mlx5_find_group(ft, match_criteria_enable, match_criteria, - &group_ix); - if (err) { - mlx5_core_warn(ft->dev, "mlx5_find_group failed\n"); - return err; - } - - err = alloc_flow_index(ft, group_ix, flow_index); - if (err) { - mlx5_core_warn(ft->dev, "alloc_flow_index failed\n"); - return err; - } - - return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context); -} -EXPORT_SYMBOL(mlx5_add_flow_table_entry); - -void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index) -{ - struct mlx5_flow_table *ft = flow_table; - - mlx5_del_flow_entry_cmd(ft, flow_index); - mlx5_free_flow_index(ft, flow_index); -} -EXPORT_SYMBOL(mlx5_del_flow_table_entry); - -void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, - u16 num_groups, - struct mlx5_flow_table_group *group) -{ - struct mlx5_flow_table *ft; - u32 start_ix = 0; - u32 ft_size = 0; - void *gr; - void *bm; - int err; - int i; - - for (i = 0; i < num_groups; i++) - ft_size += (1 << group[i].log_sz); - - ft = kzalloc(sizeof(*ft), GFP_KERNEL); - gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL); - bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL); - if (!ft || !gr || !bm) - goto err_free_ft; - - ft->group = gr; - ft->bitmap = bm; - ft->num_groups = num_groups; - ft->level = level; - ft->type = table_type; - ft->size = ft_size; - ft->dev = dev; - mutex_init(&ft->mutex); - - for (i = 0; i < ft->num_groups; i++) { - memcpy(&ft->group[i].g, &group[i], sizeof(*group)); - ft->group[i].start_ix = start_ix; - start_ix += 1 << group[i].log_sz; - } - - err = mlx5_create_flow_table_cmd(ft); - if (err) - goto err_free_ft; - - err = mlx5_create_flow_table_groups(ft); - if (err) - goto err_destroy_flow_table_cmd; - - return ft; - -err_destroy_flow_table_cmd: - mlx5_destroy_flow_table_cmd(ft); - -err_free_ft: - mlx5_core_warn(dev, "failed to alloc flow table\n"); - kfree(bm); - kfree(gr); - kfree(ft); - - return NULL; -} -EXPORT_SYMBOL(mlx5_create_flow_table); - -void mlx5_destroy_flow_table(void *flow_table) -{ - struct mlx5_flow_table *ft = flow_table; - - mlx5_destroy_flow_table_groups(ft); - mlx5_destroy_flow_table_cmd(ft); - kfree(ft->bitmap); - kfree(ft->group); - kfree(ft); -} -EXPORT_SYMBOL(mlx5_destroy_flow_table); - -u32 mlx5_get_flow_table_id(void *flow_table) -{ - struct mlx5_flow_table *ft = flow_table; - - return ft->id; -} -EXPORT_SYMBOL(mlx5_get_flow_table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c new file mode 100644 index 000000000000..a9894d2e8e26 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/mlx5_ifc.h> + +#include "fs_core.h" +#include "fs_cmd.h" +#include "mlx5_core.h" + +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); + MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, unsigned int level, + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(create_flow_table_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_TABLE); + + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } + MLX5_SET(create_flow_table_in, in, table_type, type); + MLX5_SET(create_flow_table_in, in, level, level); + MLX5_SET(create_flow_table_in, in, log_size, log_size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); + + if (!err) + *table_id = MLX5_GET(create_flow_table_out, out, + table_id); + return err; +} + +int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_flow_table_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_TABLE); + MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); + MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(modify_flow_table_in, in, opcode, + MLX5_CMD_OP_MODIFY_FLOW_TABLE); + MLX5_SET(modify_flow_table_in, in, table_type, ft->type); + MLX5_SET(modify_flow_table_in, in, table_id, ft->id); + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; + int err; + + memset(out, 0, sizeof(out)); + + MLX5_SET(create_flow_group_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET(create_flow_group_in, in, table_type, ft->type); + MLX5_SET(create_flow_group_in, in, table_id, ft->id); + + err = mlx5_cmd_exec_check_status(dev, in, + inlen, out, + sizeof(out)); + if (!err) + *group_id = MLX5_GET(create_flow_group_out, out, + group_id); + + return err; +} + +int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) +{ + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_flow_group_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); + MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); + MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, + int opmod, int modify_mask, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + + fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); + u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + struct mlx5_flow_rule *dst; + void *in_flow_context; + void *in_match_value; + void *in_dests; + u32 *in; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, op_mod, opmod); + MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask); + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, fte->index); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); + MLX5_SET(flow_context, in_flow_context, action, fte->action); + MLX5_SET(flow_context, in_flow_context, destination_list_size, + fte->dests_size); + in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, + match_value); + memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); + + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + list_for_each_entry(dst, &fte->node.children, node.list) { + unsigned int id; + + MLX5_SET(dest_format_struct, in_dests, destination_type, + dst->dest_attr.type); + if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + id = dst->dest_attr.ft->id; + else + id = dst->dest_attr.tir_num; + MLX5_SET(dest_format_struct, in_dests, destination_id, id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + } + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, + sizeof(out)); + kvfree(in); + + return err; +} + +int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); +} + +int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + int opmod; + int modify_mask; + int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive. + flow_modify_en); + if (!atomic_mod_cap) + return -ENOTSUPP; + opmod = 1; + modify_mask = 1 << + MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST; + + return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); +} + +int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index) +{ + u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + MLX5_SET(delete_fte_in, in, table_type, ft->type); + MLX5_SET(delete_fte_in, in, table_id, ft->id); + MLX5_SET(delete_fte_in, in, flow_index, index); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h new file mode 100644 index 000000000000..9814d4784803 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_CMD_ +#define _MLX5_FS_CMD_ + +int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, unsigned int level, + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id); + +int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); + +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft); + +int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, unsigned int *group_id); + +int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id); + +int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte); + +int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte); + +int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index); + +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c new file mode 100644 index 000000000000..6f68dba8d7ed --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -0,0 +1,1514 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mutex.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" +#include "fs_core.h" +#include "fs_cmd.h" + +#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ + sizeof(struct init_tree_node)) + +#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ + ...) {.type = FS_TYPE_PRIO,\ + .min_ft_level = min_level_val,\ + .max_ft = max_ft_val,\ + .num_leaf_prios = num_prios_val,\ + .caps = caps_val,\ + .children = (struct init_tree_node[]) {__VA_ARGS__},\ + .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ +} + +#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ + ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ + __VA_ARGS__)\ + +#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ + .children = (struct init_tree_node[]) {__VA_ARGS__},\ + .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ +} + +#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ + sizeof(long)) + +#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) + +#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ + .caps = (long[]) {__VA_ARGS__} } + +#define LEFTOVERS_MAX_FT 1 +#define LEFTOVERS_NUM_PRIOS 1 +#define BY_PASS_PRIO_MAX_FT 1 +#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_MAX_FT) + +#define KERNEL_MAX_FT 2 +#define KERNEL_NUM_PRIOS 1 +#define KENREL_MIN_LEVEL 2 + +struct node_caps { + size_t arr_sz; + long *caps; +}; +static struct init_tree_node { + enum fs_node_type type; + struct init_tree_node *children; + int ar_size; + struct node_caps caps; + int min_ft_level; + int num_leaf_prios; + int prio; + int max_ft; +} root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 3, + .children = (struct init_tree_node[]) { + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), + ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), + } +}; + +enum fs_i_mutex_lock_class { + FS_MUTEX_GRANDPARENT, + FS_MUTEX_PARENT, + FS_MUTEX_CHILD +}; + +static void del_rule(struct fs_node *node); +static void del_flow_table(struct fs_node *node); +static void del_flow_group(struct fs_node *node); +static void del_fte(struct fs_node *node); + +static void tree_init_node(struct fs_node *node, + unsigned int refcount, + void (*remove_func)(struct fs_node *)) +{ + atomic_set(&node->refcount, refcount); + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->children); + mutex_init(&node->lock); + node->remove_func = remove_func; +} + +static void tree_add_node(struct fs_node *node, struct fs_node *parent) +{ + if (parent) + atomic_inc(&parent->refcount); + node->parent = parent; + + /* Parent is the root */ + if (!parent) + node->root = node; + else + node->root = parent->root; +} + +static void tree_get_node(struct fs_node *node) +{ + atomic_inc(&node->refcount); +} + +static void nested_lock_ref_node(struct fs_node *node, + enum fs_i_mutex_lock_class class) +{ + if (node) { + mutex_lock_nested(&node->lock, class); + atomic_inc(&node->refcount); + } +} + +static void lock_ref_node(struct fs_node *node) +{ + if (node) { + mutex_lock(&node->lock); + atomic_inc(&node->refcount); + } +} + +static void unlock_ref_node(struct fs_node *node) +{ + if (node) { + atomic_dec(&node->refcount); + mutex_unlock(&node->lock); + } +} + +static void tree_put_node(struct fs_node *node) +{ + struct fs_node *parent_node = node->parent; + + lock_ref_node(parent_node); + if (atomic_dec_and_test(&node->refcount)) { + if (parent_node) + list_del_init(&node->list); + if (node->remove_func) + node->remove_func(node); + kfree(node); + node = NULL; + } + unlock_ref_node(parent_node); + if (!node && parent_node) + tree_put_node(parent_node); +} + +static int tree_remove_node(struct fs_node *node) +{ + if (atomic_read(&node->refcount) > 1) + return -EPERM; + tree_put_node(node); + return 0; +} + +static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, + unsigned int prio) +{ + struct fs_prio *iter_prio; + + fs_for_each_prio(iter_prio, ns) { + if (iter_prio->prio == prio) + return iter_prio; + } + + return NULL; +} + +static unsigned int find_next_free_level(struct fs_prio *prio) +{ + if (!list_empty(&prio->node.children)) { + struct mlx5_flow_table *ft; + + ft = list_last_entry(&prio->node.children, + struct mlx5_flow_table, + node.list); + return ft->level + 1; + } + return prio->start_level; +} + +static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++, mask++, val1++, val2++) + if ((*((u8 *)val1) & (*(u8 *)mask)) != + ((*(u8 *)val2) & (*(u8 *)mask))) + return false; + + return true; +} + +static bool compare_match_value(struct mlx5_flow_group_mask *mask, + void *fte_param1, void *fte_param2) +{ + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, outer_headers); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, outer_headers); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, outer_headers); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + return false; + } + + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, misc_parameters); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, misc_parameters); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, misc_parameters); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_misc))) + return false; + } + + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, inner_headers); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, inner_headers); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, inner_headers); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + return false; + } + return true; +} + +static bool compare_match_criteria(u8 match_criteria_enable1, + u8 match_criteria_enable2, + void *mask1, void *mask2) +{ + return match_criteria_enable1 == match_criteria_enable2 && + !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param)); +} + +static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) +{ + struct fs_node *root; + struct mlx5_flow_namespace *ns; + + root = node->root; + + if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) { + pr_warn("mlx5: flow steering node is not in tree or garbaged\n"); + return NULL; + } + + ns = container_of(root, struct mlx5_flow_namespace, node); + return container_of(ns, struct mlx5_flow_root_namespace, ns); +} + +static inline struct mlx5_core_dev *get_dev(struct fs_node *node) +{ + struct mlx5_flow_root_namespace *root = find_root(node); + + if (root) + return root->dev; + return NULL; +} + +static void del_flow_table(struct fs_node *node) +{ + struct mlx5_flow_table *ft; + struct mlx5_core_dev *dev; + struct fs_prio *prio; + int err; + + fs_get_obj(ft, node); + dev = get_dev(&ft->node); + + err = mlx5_cmd_destroy_flow_table(dev, ft); + if (err) + pr_warn("flow steering can't destroy ft\n"); + fs_get_obj(prio, ft->node.parent); + prio->num_ft--; +} + +static void del_rule(struct fs_node *node) +{ + struct mlx5_flow_rule *rule; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct fs_fte *fte; + u32 *match_value; + struct mlx5_core_dev *dev = get_dev(node); + int match_len = MLX5_ST_SZ_BYTES(fte_match_param); + int err; + + match_value = mlx5_vzalloc(match_len); + if (!match_value) { + pr_warn("failed to allocate inbox\n"); + return; + } + + fs_get_obj(rule, node); + fs_get_obj(fte, rule->node.parent); + fs_get_obj(fg, fte->node.parent); + memcpy(match_value, fte->val, sizeof(fte->val)); + fs_get_obj(ft, fg->node.parent); + list_del(&rule->node.list); + fte->dests_size--; + if (fte->dests_size) { + err = mlx5_cmd_update_fte(dev, ft, + fg->id, fte); + if (err) + pr_warn("%s can't del rule fg id=%d fte_index=%d\n", + __func__, fg->id, fte->index); + } + kvfree(match_value); +} + +static void del_fte(struct fs_node *node) +{ + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_core_dev *dev; + struct fs_fte *fte; + int err; + + fs_get_obj(fte, node); + fs_get_obj(fg, fte->node.parent); + fs_get_obj(ft, fg->node.parent); + + dev = get_dev(&ft->node); + err = mlx5_cmd_delete_fte(dev, ft, + fte->index); + if (err) + pr_warn("flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); + + fte->status = 0; + fg->num_ftes--; +} + +static void del_flow_group(struct fs_node *node) +{ + struct mlx5_flow_group *fg; + struct mlx5_flow_table *ft; + struct mlx5_core_dev *dev; + + fs_get_obj(fg, node); + fs_get_obj(ft, fg->node.parent); + dev = get_dev(&ft->node); + + if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + pr_warn("flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); +} + +static struct fs_fte *alloc_fte(u8 action, + u32 flow_tag, + u32 *match_value, + unsigned int index) +{ + struct fs_fte *fte; + + fte = kzalloc(sizeof(*fte), GFP_KERNEL); + if (!fte) + return ERR_PTR(-ENOMEM); + + memcpy(fte->val, match_value, sizeof(fte->val)); + fte->node.type = FS_TYPE_FLOW_ENTRY; + fte->flow_tag = flow_tag; + fte->index = index; + fte->action = action; + + return fte; +} + +static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) +{ + struct mlx5_flow_group *fg; + void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, + create_fg_in, match_criteria); + u8 match_criteria_enable = MLX5_GET(create_flow_group_in, + create_fg_in, + match_criteria_enable); + fg = kzalloc(sizeof(*fg), GFP_KERNEL); + if (!fg) + return ERR_PTR(-ENOMEM); + + fg->mask.match_criteria_enable = match_criteria_enable; + memcpy(&fg->mask.match_criteria, match_criteria, + sizeof(fg->mask.match_criteria)); + fg->node.type = FS_TYPE_FLOW_GROUP; + fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in, + start_flow_index); + fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in, + end_flow_index) - fg->start_index + 1; + return fg; +} + +static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, + enum fs_flow_table_type table_type) +{ + struct mlx5_flow_table *ft; + + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + if (!ft) + return NULL; + + ft->level = level; + ft->node.type = FS_TYPE_FLOW_TABLE; + ft->type = table_type; + ft->max_fte = max_fte; + + return ft; +} + +/* If reverse is false, then we search for the first flow table in the + * root sub-tree from start(closest from right), else we search for the + * last flow table in the root sub-tree till start(closest from left). + */ +static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, + struct list_head *start, + bool reverse) +{ +#define list_advance_entry(pos, reverse) \ + ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list)) + +#define list_for_each_advance_continue(pos, head, reverse) \ + for (pos = list_advance_entry(pos, reverse); \ + &pos->list != (head); \ + pos = list_advance_entry(pos, reverse)) + + struct fs_node *iter = list_entry(start, struct fs_node, list); + struct mlx5_flow_table *ft = NULL; + + if (!root) + return NULL; + + list_for_each_advance_continue(iter, &root->children, reverse) { + if (iter->type == FS_TYPE_FLOW_TABLE) { + fs_get_obj(ft, iter); + return ft; + } + ft = find_closest_ft_recursive(iter, &iter->children, reverse); + if (ft) + return ft; + } + + return ft; +} + +/* If reverse if false then return the first flow table in next priority of + * prio in the tree, else return the last flow table in the previous priority + * of prio in the tree. + */ +static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +{ + struct mlx5_flow_table *ft = NULL; + struct fs_node *curr_node; + struct fs_node *parent; + + parent = prio->node.parent; + curr_node = &prio->node; + while (!ft && parent) { + ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + curr_node = parent; + parent = curr_node->parent; + } + return ft; +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, false); +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, true); +} + +static int connect_fts_in_prio(struct mlx5_core_dev *dev, + struct fs_prio *prio, + struct mlx5_flow_table *ft) +{ + struct mlx5_flow_table *iter; + int i = 0; + int err; + + fs_for_each_ft(iter, prio) { + i++; + err = mlx5_cmd_modify_flow_table(dev, + iter, + ft); + if (err) { + mlx5_core_warn(dev, "Failed to modify flow table %d\n", + iter->id); + /* The driver is out of sync with the FW */ + if (i > 1) + WARN_ON(true); + return err; + } + } + return 0; +} + +/* Connect flow tables from previous priority of prio to ft */ +static int connect_prev_fts(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + struct mlx5_flow_table *prev_ft; + + prev_ft = find_prev_chained_ft(prio); + if (prev_ft) { + struct fs_prio *prev_prio; + + fs_get_obj(prev_prio, prev_ft->node.parent); + return connect_fts_in_prio(dev, prev_prio, ft); + } + return 0; +} + +static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio + *prio) +{ + struct mlx5_flow_root_namespace *root = find_root(&prio->node); + int min_level = INT_MAX; + int err; + + if (root->root_ft) + min_level = root->root_ft->level; + + if (ft->level >= min_level) + return 0; + + err = mlx5_cmd_update_root_ft(root->dev, ft); + if (err) + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + else + root->root_ft = ft; + + return err; +} + +static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + int err = 0; + + /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ + + if (list_empty(&prio->node.children)) { + err = connect_prev_fts(dev, ft, prio); + if (err) + return err; + } + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.modify_root)) + err = update_root_ft_create(ft, prio); + return err; +} + +struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int max_fte) +{ + struct mlx5_flow_table *next_ft = NULL; + struct mlx5_flow_table *ft; + int err; + int log_table_sz; + struct mlx5_flow_root_namespace *root = + find_root(&ns->node); + struct fs_prio *fs_prio = NULL; + + if (!root) { + pr_err("mlx5: flow steering failed to find root of namespace\n"); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&root->chain_lock); + fs_prio = find_prio(ns, prio); + if (!fs_prio) { + err = -EINVAL; + goto unlock_root; + } + if (fs_prio->num_ft == fs_prio->max_ft) { + err = -ENOSPC; + goto unlock_root; + } + + ft = alloc_flow_table(find_next_free_level(fs_prio), + roundup_pow_of_two(max_fte), + root->table_type); + if (!ft) { + err = -ENOMEM; + goto unlock_root; + } + + tree_init_node(&ft->node, 1, del_flow_table); + log_table_sz = ilog2(ft->max_fte); + next_ft = find_next_chained_ft(fs_prio); + err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level, + log_table_sz, next_ft, &ft->id); + if (err) + goto free_ft; + + err = connect_flow_table(root->dev, ft, fs_prio); + if (err) + goto destroy_ft; + lock_ref_node(&fs_prio->node); + tree_add_node(&ft->node, &fs_prio->node); + list_add_tail(&ft->node.list, &fs_prio->node.children); + fs_prio->num_ft++; + unlock_ref_node(&fs_prio->node); + mutex_unlock(&root->chain_lock); + return ft; +destroy_ft: + mlx5_cmd_destroy_flow_table(root->dev, ft); +free_ft: + kfree(ft); +unlock_root: + mutex_unlock(&root->chain_lock); + return ERR_PTR(err); +} + +struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups) +{ + struct mlx5_flow_table *ft; + + if (max_num_groups > num_flow_table_entries) + return ERR_PTR(-EINVAL); + + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); + if (IS_ERR(ft)) + return ft; + + ft->autogroup.active = true; + ft->autogroup.required_groups = max_num_groups; + + return ft; +} +EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); + +/* Flow table should be locked */ +static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft, + u32 *fg_in, + struct list_head + *prev_fg, + bool is_auto_fg) +{ + struct mlx5_flow_group *fg; + struct mlx5_core_dev *dev = get_dev(&ft->node); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + + fg = alloc_flow_group(fg_in); + if (IS_ERR(fg)) + return fg; + + err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); + if (err) { + kfree(fg); + return ERR_PTR(err); + } + + if (ft->autogroup.active) + ft->autogroup.num_groups++; + /* Add node to tree */ + tree_init_node(&fg->node, !is_auto_fg, del_flow_group); + tree_add_node(&fg->node, &ft->node); + /* Add node to group list */ + list_add(&fg->node.list, ft->node.children.prev); + + return fg; +} + +struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, + u32 *fg_in) +{ + struct mlx5_flow_group *fg; + + if (ft->autogroup.active) + return ERR_PTR(-EPERM); + + lock_ref_node(&ft->node); + fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); + unlock_ref_node(&ft->node); + + return fg; +} + +static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return NULL; + + rule->node.type = FS_TYPE_FLOW_DEST; + memcpy(&rule->dest_attr, dest, sizeof(*dest)); + + return rule; +} + +/* fte should not be deleted while calling this function */ +static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, + struct mlx5_flow_group *fg, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_table *ft; + struct mlx5_flow_rule *rule; + int err; + + rule = alloc_rule(dest); + if (!rule) + return ERR_PTR(-ENOMEM); + + fs_get_obj(ft, fg->node.parent); + /* Add dest to dests list- added as first element after the head */ + tree_init_node(&rule->node, 1, del_rule); + list_add_tail(&rule->node.list, &fte->node.children); + fte->dests_size++; + if (fte->dests_size == 1) + err = mlx5_cmd_create_fte(get_dev(&ft->node), + ft, fg->id, fte); + else + err = mlx5_cmd_update_fte(get_dev(&ft->node), + ft, fg->id, fte); + if (err) + goto free_rule; + + fte->status |= FS_FTE_STATUS_EXISTING; + + return rule; + +free_rule: + list_del(&rule->node.list); + kfree(rule); + fte->dests_size--; + return ERR_PTR(err); +} + +/* Assumed fg is locked */ +static unsigned int get_free_fte_index(struct mlx5_flow_group *fg, + struct list_head **prev) +{ + struct fs_fte *fte; + unsigned int start = fg->start_index; + + if (prev) + *prev = &fg->node.children; + + /* assumed list is sorted by index */ + fs_for_each_fte(fte, fg) { + if (fte->index != start) + return start; + start++; + if (prev) + *prev = &fte->node.list; + } + + return start; +} + +/* prev is output, prev->next = new_fte */ +static struct fs_fte *create_fte(struct mlx5_flow_group *fg, + u32 *match_value, + u8 action, + u32 flow_tag, + struct list_head **prev) +{ + struct fs_fte *fte; + int index; + + index = get_free_fte_index(fg, prev); + fte = alloc_fte(action, flow_tag, match_value, index); + if (IS_ERR(fte)) + return fte; + + return fte; +} + +static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct list_head *prev = &ft->node.children; + unsigned int candidate_index = 0; + struct mlx5_flow_group *fg; + void *match_criteria_addr; + unsigned int group_size = 0; + u32 *in; + + if (!ft->autogroup.active) + return ERR_PTR(-ENOENT); + + in = mlx5_vzalloc(inlen); + if (!in) + return ERR_PTR(-ENOMEM); + + if (ft->autogroup.num_groups < ft->autogroup.required_groups) + /* We save place for flow groups in addition to max types */ + group_size = ft->max_fte / (ft->autogroup.required_groups + 1); + + /* ft->max_fte == ft->autogroup.max_types */ + if (group_size == 0) + group_size = 1; + + /* sorted by start_index */ + fs_for_each_fg(fg, ft) { + if (candidate_index + group_size > fg->start_index) + candidate_index = fg->start_index + fg->max_ftes; + else + break; + prev = &fg->node.list; + } + + if (candidate_index + group_size > ft->max_fte) { + fg = ERR_PTR(-ENOSPC); + goto out; + } + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, + match_criteria_enable); + MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index); + MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index + + group_size - 1); + match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, + in, match_criteria); + memcpy(match_criteria_addr, match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + fg = create_flow_group_common(ft, in, prev, true); +out: + kvfree(in); + return fg; +} + +static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, + u32 *match_value, + u8 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct fs_fte *fte; + struct mlx5_flow_rule *rule; + struct mlx5_flow_table *ft; + struct list_head *prev; + + nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); + fs_for_each_fte(fte, fg) { + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); + if (compare_match_value(&fg->mask, match_value, &fte->val) && + action == fte->action && flow_tag == fte->flow_tag) { + rule = add_rule_fte(fte, fg, dest); + unlock_ref_node(&fte->node); + if (IS_ERR(rule)) + goto unlock_fg; + else + goto add_rule; + } + unlock_ref_node(&fte->node); + } + fs_get_obj(ft, fg->node.parent); + if (fg->num_ftes >= fg->max_ftes) { + rule = ERR_PTR(-ENOSPC); + goto unlock_fg; + } + + fte = create_fte(fg, match_value, action, flow_tag, &prev); + if (IS_ERR(fte)) { + rule = (void *)fte; + goto unlock_fg; + } + tree_init_node(&fte->node, 0, del_fte); + rule = add_rule_fte(fte, fg, dest); + if (IS_ERR(rule)) { + kfree(fte); + goto unlock_fg; + } + + fg->num_ftes++; + + tree_add_node(&fte->node, &fg->node); + list_add(&fte->node.list, prev); +add_rule: + tree_add_node(&rule->node, &fte->node); +unlock_fg: + unlock_ref_node(&fg->node); + return rule; +} + +static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u8 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + struct mlx5_flow_group *g; + + g = create_autogroup(ft, match_criteria_enable, match_criteria); + if (IS_ERR(g)) + return (void *)g; + + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (IS_ERR(rule)) { + /* Remove assumes refcount > 0 and autogroup creates a group + * with a refcount = 0. + */ + tree_get_node(&g->node); + tree_remove_node(&g->node); + } + return rule; +} + +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_group *g; + struct mlx5_flow_rule *rule; + + nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); + fs_for_each_fg(g, ft) + if (compare_match_criteria(g->mask.match_criteria_enable, + match_criteria_enable, + g->mask.match_criteria, + match_criteria)) { + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) + goto unlock; + } + + rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, + match_value, action, flow_tag, dest); +unlock: + unlock_ref_node(&ft->node); + return rule; +} +EXPORT_SYMBOL(mlx5_add_flow_rule); + +void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) +{ + tree_remove_node(&rule->node); +} +EXPORT_SYMBOL(mlx5_del_flow_rule); + +/* Assuming prio->node.children(flow tables) is sorted by level */ +static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) +{ + struct fs_prio *prio; + + fs_get_obj(prio, ft->node.parent); + + if (!list_is_last(&ft->node.list, &prio->node.children)) + return list_next_entry(ft, node.list); + return find_next_chained_ft(prio); +} + +static int update_root_ft_destroy(struct mlx5_flow_table *ft) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct mlx5_flow_table *new_root_ft = NULL; + + if (root->root_ft != ft) + return 0; + + new_root_ft = find_next_ft(ft); + if (new_root_ft) { + int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); + + if (err) { + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + return err; + } + root->root_ft = new_root_ft; + } + return 0; +} + +/* Connect flow table from previous priority to + * the next flow table. + */ +static int disconnect_flow_table(struct mlx5_flow_table *ft) +{ + struct mlx5_core_dev *dev = get_dev(&ft->node); + struct mlx5_flow_table *next_ft; + struct fs_prio *prio; + int err = 0; + + err = update_root_ft_destroy(ft); + if (err) + return err; + + fs_get_obj(prio, ft->node.parent); + if (!(list_first_entry(&prio->node.children, + struct mlx5_flow_table, + node.list) == ft)) + return 0; + + next_ft = find_next_chained_ft(prio); + err = connect_prev_fts(dev, next_ft, prio); + if (err) + mlx5_core_warn(dev, "Failed to disconnect flow table %d\n", + ft->id); + return err; +} + +int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + int err = 0; + + mutex_lock(&root->chain_lock); + err = disconnect_flow_table(ft); + if (err) { + mutex_unlock(&root->chain_lock); + return err; + } + if (tree_remove_node(&ft->node)) + mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", + ft->id); + mutex_unlock(&root->chain_lock); + + return err; +} +EXPORT_SYMBOL(mlx5_destroy_flow_table); + +void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) +{ + if (tree_remove_node(&fg->node)) + mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", + fg->id); +} + +struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type) +{ + struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + int prio; + static struct fs_prio *fs_prio; + struct mlx5_flow_namespace *ns; + + if (!root_ns) + return NULL; + + switch (type) { + case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_KERNEL: + case MLX5_FLOW_NAMESPACE_LEFTOVERS: + prio = type; + break; + case MLX5_FLOW_NAMESPACE_FDB: + if (dev->priv.fdb_root_ns) + return &dev->priv.fdb_root_ns->ns; + else + return NULL; + default: + return NULL; + } + + fs_prio = find_prio(&root_ns->ns, prio); + if (!fs_prio) + return NULL; + + ns = list_first_entry(&fs_prio->node.children, + typeof(*ns), + node.list); + + return ns; +} +EXPORT_SYMBOL(mlx5_get_flow_namespace); + +static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, + unsigned prio, int max_ft) +{ + struct fs_prio *fs_prio; + + fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL); + if (!fs_prio) + return ERR_PTR(-ENOMEM); + + fs_prio->node.type = FS_TYPE_PRIO; + tree_init_node(&fs_prio->node, 1, NULL); + tree_add_node(&fs_prio->node, &ns->node); + fs_prio->max_ft = max_ft; + fs_prio->prio = prio; + list_add_tail(&fs_prio->node.list, &ns->node.children); + + return fs_prio; +} + +static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace + *ns) +{ + ns->node.type = FS_TYPE_NAMESPACE; + + return ns; +} + +static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) +{ + struct mlx5_flow_namespace *ns; + + ns = kzalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return ERR_PTR(-ENOMEM); + + fs_init_namespace(ns); + tree_init_node(&ns->node, 1, NULL); + tree_add_node(&ns->node, &prio->node); + list_add_tail(&ns->node.list, &prio->node.children); + + return ns; +} + +static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node + *prio_metadata) +{ + struct fs_prio *fs_prio; + int i; + + for (i = 0; i < prio_metadata->num_leaf_prios; i++) { + fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); + if (IS_ERR(fs_prio)) + return PTR_ERR(fs_prio); + } + return 0; +} + +#define FLOW_TABLE_BIT_SZ 1 +#define GET_FLOW_TABLE_CAP(dev, offset) \ + ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + offset / 32)) >> \ + (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) +static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) +{ + int i; + + for (i = 0; i < caps->arr_sz; i++) { + if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) + return false; + } + return true; +} + +static int init_root_tree_recursive(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, + struct fs_node *fs_parent_node, + struct init_tree_node *init_parent_node, + int index) +{ + int max_ft_level = MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive. + max_ft_level); + struct mlx5_flow_namespace *fs_ns; + struct fs_prio *fs_prio; + struct fs_node *base; + int i; + int err; + + if (init_node->type == FS_TYPE_PRIO) { + if ((init_node->min_ft_level > max_ft_level) || + !has_required_caps(dev, &init_node->caps)) + return 0; + + fs_get_obj(fs_ns, fs_parent_node); + if (init_node->num_leaf_prios) + return create_leaf_prios(fs_ns, init_node); + fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); + if (IS_ERR(fs_prio)) + return PTR_ERR(fs_prio); + base = &fs_prio->node; + } else if (init_node->type == FS_TYPE_NAMESPACE) { + fs_get_obj(fs_prio, fs_parent_node); + fs_ns = fs_create_namespace(fs_prio); + if (IS_ERR(fs_ns)) + return PTR_ERR(fs_ns); + base = &fs_ns->node; + } else { + return -EINVAL; + } + for (i = 0; i < init_node->ar_size; i++) { + err = init_root_tree_recursive(dev, &init_node->children[i], + base, init_node, i); + if (err) + return err; + } + + return 0; +} + +static int init_root_tree(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, + struct fs_node *fs_parent_node) +{ + int i; + struct mlx5_flow_namespace *fs_ns; + int err; + + fs_get_obj(fs_ns, fs_parent_node); + for (i = 0; i < init_node->ar_size; i++) { + err = init_root_tree_recursive(dev, &init_node->children[i], + &fs_ns->node, + init_node, i); + if (err) + return err; + } + return 0; +} + +static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, + enum fs_flow_table_type + table_type) +{ + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_namespace *ns; + + /* Create the root namespace */ + root_ns = mlx5_vzalloc(sizeof(*root_ns)); + if (!root_ns) + return NULL; + + root_ns->dev = dev; + root_ns->table_type = table_type; + + ns = &root_ns->ns; + fs_init_namespace(ns); + mutex_init(&root_ns->chain_lock); + tree_init_node(&ns->node, 1, NULL); + tree_add_node(&ns->node, NULL); + + return root_ns; +} + +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level); + +static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) +{ + struct fs_prio *prio; + + fs_for_each_prio(prio, ns) { + /* This updates prio start_level and max_ft */ + set_prio_attrs_in_prio(prio, acc_level); + acc_level += prio->max_ft; + } + return acc_level; +} + +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) +{ + struct mlx5_flow_namespace *ns; + int acc_level_ns = acc_level; + + prio->start_level = acc_level; + fs_for_each_ns(ns, prio) + /* This updates start_level and max_ft of ns's priority descendants */ + acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); + if (!prio->max_ft) + prio->max_ft = acc_level_ns - prio->start_level; + WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); +} + +static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) +{ + struct mlx5_flow_namespace *ns = &root_ns->ns; + struct fs_prio *prio; + int start_level = 0; + + fs_for_each_prio(prio, ns) { + set_prio_attrs_in_prio(prio, start_level); + start_level += prio->max_ft; + } +} + +static int init_root_ns(struct mlx5_core_dev *dev) +{ + + dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); + if (IS_ERR_OR_NULL(dev->priv.root_ns)) + goto cleanup; + + if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) + goto cleanup; + + set_prio_attrs(dev->priv.root_ns); + + return 0; + +cleanup: + mlx5_cleanup_fs(dev); + return -ENOMEM; +} + +static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, + struct mlx5_flow_root_namespace *root_ns) +{ + struct fs_node *prio; + + if (!root_ns) + return; + + if (!list_empty(&root_ns->ns.node.children)) { + prio = list_first_entry(&root_ns->ns.node.children, + struct fs_node, + list); + if (tree_remove_node(prio)) + mlx5_core_warn(dev, + "Flow steering priority wasn't destroyed, refcount > 1\n"); + } + if (tree_remove_node(&root_ns->ns.node)) + mlx5_core_warn(dev, + "Flow steering namespace wasn't destroyed, refcount > 1\n"); + root_ns = NULL; +} + +static void cleanup_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + struct fs_prio *iter_prio; + + if (!MLX5_CAP_GEN(dev, nic_flow_table)) + return; + + if (!root_ns) + return; + + /* stage 1 */ + fs_for_each_prio(iter_prio, &root_ns->ns) { + struct fs_node *node; + struct mlx5_flow_namespace *iter_ns; + + fs_for_each_ns_or_ft(node, iter_prio) { + if (node->type == FS_TYPE_FLOW_TABLE) + continue; + fs_get_obj(iter_ns, node); + while (!list_empty(&iter_ns->node.children)) { + struct fs_prio *obj_iter_prio2; + struct fs_node *iter_prio2 = + list_first_entry(&iter_ns->node.children, + struct fs_node, + list); + + fs_get_obj(obj_iter_prio2, iter_prio2); + if (tree_remove_node(iter_prio2)) { + mlx5_core_warn(dev, + "Priority %d wasn't destroyed, refcount > 1\n", + obj_iter_prio2->prio); + return; + } + } + } + } + + /* stage 2 */ + fs_for_each_prio(iter_prio, &root_ns->ns) { + while (!list_empty(&iter_prio->node.children)) { + struct fs_node *iter_ns = + list_first_entry(&iter_prio->node.children, + struct fs_node, + list); + if (tree_remove_node(iter_ns)) { + mlx5_core_warn(dev, + "Namespace wasn't destroyed, refcount > 1\n"); + return; + } + } + } + + /* stage 3 */ + while (!list_empty(&root_ns->ns.node.children)) { + struct fs_prio *obj_prio_node; + struct fs_node *prio_node = + list_first_entry(&root_ns->ns.node.children, + struct fs_node, + list); + + fs_get_obj(obj_prio_node, prio_node); + if (tree_remove_node(prio_node)) { + mlx5_core_warn(dev, + "Priority %d wasn't destroyed, refcount > 1\n", + obj_prio_node->prio); + return; + } + } + + if (tree_remove_node(&root_ns->ns.node)) { + mlx5_core_warn(dev, + "root namespace wasn't destroyed, refcount > 1\n"); + return; + } + + dev->priv.root_ns = NULL; +} + +void mlx5_cleanup_fs(struct mlx5_core_dev *dev) +{ + cleanup_root_ns(dev); + cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); +} + +static int init_fdb_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); + if (!dev->priv.fdb_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); + return PTR_ERR(prio); + } else { + return 0; + } +} + +int mlx5_init_fs(struct mlx5_core_dev *dev) +{ + int err = 0; + + if (MLX5_CAP_GEN(dev, nic_flow_table)) { + err = init_root_ns(dev); + if (err) + return err; + } + if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = init_fdb_root_ns(dev); + if (err) + cleanup_root_ns(dev); + } + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h new file mode 100644 index 000000000000..00245fd7e4bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_CORE_ +#define _MLX5_FS_CORE_ + +#include <linux/mlx5/fs.h> + +enum fs_node_type { + FS_TYPE_NAMESPACE, + FS_TYPE_PRIO, + FS_TYPE_FLOW_TABLE, + FS_TYPE_FLOW_GROUP, + FS_TYPE_FLOW_ENTRY, + FS_TYPE_FLOW_DEST +}; + +enum fs_flow_table_type { + FS_FT_NIC_RX = 0x0, + FS_FT_FDB = 0X4, +}; + +enum fs_fte_status { + FS_FTE_STATUS_EXISTING = 1UL << 0, +}; + +struct fs_node { + struct list_head list; + struct list_head children; + enum fs_node_type type; + struct fs_node *parent; + struct fs_node *root; + /* lock the node for writing and traversing */ + struct mutex lock; + atomic_t refcount; + void (*remove_func)(struct fs_node *); +}; + +struct mlx5_flow_rule { + struct fs_node node; + struct mlx5_flow_destination dest_attr; +}; + +/* Type of children is mlx5_flow_group */ +struct mlx5_flow_table { + struct fs_node node; + u32 id; + unsigned int max_fte; + unsigned int level; + enum fs_flow_table_type type; + struct { + bool active; + unsigned int required_groups; + unsigned int num_groups; + } autogroup; +}; + +/* Type of children is mlx5_flow_rule */ +struct fs_fte { + struct fs_node node; + u32 val[MLX5_ST_SZ_DW(fte_match_param)]; + u32 dests_size; + u32 flow_tag; + u32 index; + u32 action; + enum fs_fte_status status; +}; + +/* Type of children is mlx5_flow_table/namespace */ +struct fs_prio { + struct fs_node node; + unsigned int max_ft; + unsigned int start_level; + unsigned int prio; + unsigned int num_ft; +}; + +/* Type of children is fs_prio */ +struct mlx5_flow_namespace { + /* parent == NULL => root ns */ + struct fs_node node; +}; + +struct mlx5_flow_group_mask { + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; +}; + +/* Type of children is fs_fte */ +struct mlx5_flow_group { + struct fs_node node; + struct mlx5_flow_group_mask mask; + u32 start_index; + u32 max_ftes; + u32 num_ftes; + u32 id; +}; + +struct mlx5_flow_root_namespace { + struct mlx5_flow_namespace ns; + enum fs_flow_table_type table_type; + struct mlx5_core_dev *dev; + struct mlx5_flow_table *root_ft; + /* Should be held when chaining flow tables */ + struct mutex chain_lock; +}; + +int mlx5_init_fs(struct mlx5_core_dev *dev); +void mlx5_cleanup_fs(struct mlx5_core_dev *dev); + +#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } + +#define fs_list_for_each_entry(pos, root) \ + list_for_each_entry(pos, root, node.list) + +#define fs_for_each_ns_or_ft_reverse(pos, prio) \ + list_for_each_entry_reverse(pos, &(prio)->node.children, list) + +#define fs_for_each_ns_or_ft(pos, prio) \ + list_for_each_entry(pos, (&(prio)->node.children), list) + +#define fs_for_each_prio(pos, ns) \ + fs_list_for_each_entry(pos, &(ns)->node.children) + +#define fs_for_each_ns(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + +#define fs_for_each_ft(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + +#define fs_for_each_fg(pos, ft) \ + fs_list_for_each_entry(pos, &(ft)->node.children) + +#define fs_for_each_fte(pos, fg) \ + fs_list_for_each_entry(pos, &(fg)->node.children) + +#define fs_for_each_dst(pos, fte) \ + fs_list_for_each_entry(pos, &(fte)->node.children) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 9335e5ae18cc..aa1ab4702385 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -160,6 +160,30 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (err) return err; } + + if (MLX5_CAP_GEN(dev, vport_group_manager) && + MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4ac8d4cc4973..67676cf0d507 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -49,6 +49,10 @@ #include <linux/delay.h> #include <linux/mlx5/mlx5_ifc.h> #include "mlx5_core.h" +#include "fs_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); @@ -454,6 +458,9 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) struct mlx5_reg_host_endianess he_out; int err; + if (!mlx5_core_is_pf(dev)) + return 0; + memset(&he_in, 0, sizeof(he_in)); he_in.he = MLX5_SET_HOST_ENDIANNESS; err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), @@ -462,42 +469,52 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) return err; } -static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) +int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { + u32 out[MLX5_ST_SZ_DW(enable_hca_out)]; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)]; int err; - struct mlx5_enable_hca_mbox_in in; - struct mlx5_enable_hca_mbox_out out; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); + memset(in, 0, sizeof(in)); + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + MLX5_SET(enable_hca_in, in, function_id, func_id); + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return 0; + return mlx5_cmd_status_to_err_v2(out); } -static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) +int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) { + u32 out[MLX5_ST_SZ_DW(disable_hca_out)]; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)]; int err; - struct mlx5_disable_hca_mbox_in in; - struct mlx5_disable_hca_mbox_out out; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + memset(in, 0, sizeof(in)); + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, func_id); + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); + return mlx5_cmd_status_to_err_v2(out); +} - return 0; +cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) +{ + u32 timer_h, timer_h1, timer_l; + + timer_h = ioread32be(&dev->iseg->internal_timer_h); + timer_l = ioread32be(&dev->iseg->internal_timer_l); + timer_h1 = ioread32be(&dev->iseg->internal_timer_h); + if (timer_h != timer_h1) /* wrap around */ + timer_l = ioread32be(&dev->iseg->internal_timer_l); + + return (cycle_t)timer_l | (cycle_t)timer_h1 << 32; } static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) @@ -942,7 +959,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_pagealloc_init(dev); - err = mlx5_core_enable_hca(dev); + err = mlx5_core_enable_hca(dev, 0); if (err) { dev_err(&pdev->dev, "enable hca failed\n"); goto err_pagealloc_cleanup; @@ -1052,6 +1069,25 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_init_srq_table(dev); mlx5_init_mr_table(dev); + err = mlx5_init_fs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init flow steering\n"); + goto err_fs; + } +#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_eswitch_init(dev); + if (err) { + dev_err(&pdev->dev, "eswitch init failed %d\n", err); + goto err_reg_dev; + } +#endif + + err = mlx5_sriov_init(dev); + if (err) { + dev_err(&pdev->dev, "sriov init failed %d\n", err); + goto err_sriov; + } + err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); @@ -1068,7 +1104,16 @@ out: return 0; +err_sriov: + if (mlx5_sriov_cleanup(dev)) + dev_err(&dev->pdev->dev, "sriov cleanup failed\n"); + +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_cleanup(dev->priv.eswitch); +#endif err_reg_dev: + mlx5_cleanup_fs(dev); +err_fs: mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1106,7 +1151,7 @@ reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: - mlx5_core_disable_hca(dev); + mlx5_core_disable_hca(dev, 0); err_pagealloc_cleanup: mlx5_pagealloc_cleanup(dev); @@ -1123,6 +1168,13 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { int err = 0; + err = mlx5_sriov_cleanup(dev); + if (err) { + dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n", + __func__); + return err; + } + mutex_lock(&dev->intf_state_mutex); if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", @@ -1130,6 +1182,11 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } mlx5_unregister_device(dev); +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_cleanup(dev->priv.eswitch); +#endif + + mlx5_cleanup_fs(dev); mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1149,7 +1206,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); - mlx5_core_disable_hca(dev); + mlx5_core_disable_hca(dev, 0); mlx5_pagealloc_cleanup(dev); mlx5_cmd_cleanup(dev); @@ -1195,6 +1252,7 @@ static int init_one(struct pci_dev *pdev, return -ENOMEM; } priv = &dev->priv; + priv->pci_dev_data = id->driver_data; pci_set_drvdata(pdev, dev); @@ -1365,12 +1423,12 @@ static const struct pci_error_handlers mlx5_err_handler = { }; static const struct pci_device_id mlx5_core_pci_table[] = { - { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ - { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ - { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ - { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */ - { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ - { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */ + { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ + { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ { 0, } }; @@ -1381,7 +1439,8 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, - .err_handler = &mlx5_err_handler + .err_handler = &mlx5_err_handler, + .sriov_configure = mlx5_core_sriov_configure, }; static int __init init(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index cee5b7a839bc..0336847ec9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -36,6 +36,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/if_link.h> #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "3.0-1" @@ -64,6 +65,9 @@ do { \ (__dev)->priv.name, __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_info(__dev, format, ...) \ + dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) + enum { MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_TIME, /* print command execution time */ @@ -90,6 +94,11 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); +int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); +cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4d3377b12657..9eeee0545f1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -33,6 +33,7 @@ #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/delay.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" @@ -95,6 +96,7 @@ struct mlx5_manage_pages_outbox { enum { MAX_RECLAIM_TIME_MSECS = 5000, + MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, }; enum { @@ -352,6 +354,10 @@ retry: goto out_4k; } + dev->priv.fw_pages += npages; + if (func_id) + dev->priv.vfs_pages += npages; + mlx5_core_dbg(dev, "err %d\n", err); kvfree(in); @@ -405,6 +411,12 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } num_claimed = be32_to_cpu(out->num_entries); + if (num_claimed > npages) { + mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", + num_claimed, npages); + err = -EINVAL; + goto out_free; + } if (nclaimed) *nclaimed = num_claimed; @@ -412,6 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } + dev->priv.fw_pages -= num_claimed; + if (func_id) + dev->priv.vfs_pages -= num_claimed; out_free: kvfree(out); @@ -548,3 +563,26 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { destroy_workqueue(dev->priv.pg_wq); } + +int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + int prev_vfs_pages = dev->priv.vfs_pages; + + mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, + dev->priv.name); + while (dev->priv.vfs_pages) { + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); + return -ETIMEDOUT; + } + if (dev->priv.vfs_pages < prev_vfs_pages) { + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + prev_vfs_pages = dev->priv.vfs_pages; + } + msleep(50); + } + + mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c new file mode 100644 index 000000000000..7b24386794f9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif + +static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + int vf; + + for (vf = 1; vf <= num_vfs; vf++) { + err = mlx5_core_enable_hca(dev, vf); + if (err) { + mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1); + } else { + sriov->vfs_ctx[vf - 1].enabled = 1; + mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1); + } + } +} + +static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int vf; + + for (vf = 1; vf <= num_vfs; vf++) { + if (sriov->vfs_ctx[vf - 1].enabled) { + if (mlx5_core_disable_hca(dev, vf)) + mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1); + else + sriov->vfs_ctx[vf - 1].enabled = 0; + } + } +} + +static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + int err; + + if (pci_num_vf(pdev)) + pci_disable_sriov(pdev); + + enable_vfs(dev, num_vfs); + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "enable sriov failed %d\n", err); + goto ex; + } + + return 0; + +ex: + disable_vfs(dev, num_vfs); + return err; +} + +static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC); + if (!sriov->vfs_ctx) + return -ENOMEM; + + sriov->enabled_vfs = num_vfs; + err = mlx5_core_create_vfs(pdev, num_vfs); + if (err) { + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = NULL; + return err; + } + + return 0; +} + +static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + + sriov->num_vfs = num_vfs; +} + +static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov; + + sriov = &dev->priv.sriov; + disable_vfs(dev, sriov->num_vfs); + + if (mlx5_wait_for_vf_pages(dev)) + mlx5_core_warn(dev, "timeout claiming VFs pages\n"); + + sriov->num_vfs = 0; +} + +int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + + mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs); + if (!mlx5_core_is_pf(dev)) + return -EPERM; + + mlx5_core_cleanup_vfs(dev); + + if (!num_vfs) { +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_disable_sriov(dev->priv.eswitch); +#endif + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = NULL; + if (!pci_vfs_assigned(pdev)) + pci_disable_sriov(pdev); + else + pr_info("unloading PF driver while leaving orphan VFs\n"); + return 0; + } + + err = mlx5_core_sriov_enable(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); + return err; + } + + mlx5_core_init_vfs(dev, num_vfs); +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs); +#endif + + return num_vfs; +} + +static int sync_required(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int cur_vfs = pci_num_vf(pdev); + + if (cur_vfs != sriov->num_vfs) { + pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs); + return 1; + } + + return 0; +} + +int mlx5_sriov_init(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + struct pci_dev *pdev = dev->pdev; + int cur_vfs; + + if (!mlx5_core_is_pf(dev)) + return 0; + + if (!sync_required(dev->pdev)) + return 0; + + cur_vfs = pci_num_vf(pdev); + sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); + if (!sriov->vfs_ctx) + return -ENOMEM; + + sriov->enabled_vfs = cur_vfs; + + mlx5_core_init_vfs(dev, cur_vfs); +#ifdef CONFIG_MLX5_CORE_EN + if (cur_vfs) + mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs); +#endif + + enable_vfs(dev, cur_vfs); + + return 0; +} + +int mlx5_sriov_cleanup(struct mlx5_core_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err; + + if (!mlx5_core_is_pf(dev)) + return 0; + + err = mlx5_core_sriov_configure(pdev, 0); + if (err) + return err; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b94177ebcf3a..076197efea9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -36,54 +36,399 @@ #include <linux/mlx5/vport.h> #include "mlx5_core.h" -u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) +static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; - u32 out[MLX5_ST_SZ_DW(query_vport_state_out)]; int err; + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; memset(in, 0, sizeof(in)); MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); MLX5_SET(query_vport_state_in, in, op_mod, opmod); + MLX5_SET(query_vport_state_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vport_state_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); if (err) mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); + return err; +} + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) +{ + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; + + _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); + return MLX5_GET(query_vport_state_out, out, state); } -EXPORT_SYMBOL(mlx5_query_vport_state); +EXPORT_SYMBOL_GPL(mlx5_query_vport_state); + +u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) +{ + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; + + _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); + + return MLX5_GET(query_vport_state_out, out, admin_state); +} +EXPORT_SYMBOL(mlx5_query_vport_admin_state); -void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u8 state) +{ + u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]; + u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(modify_vport_state_in, in, opcode, + MLX5_CMD_OP_MODIFY_VPORT_STATE); + MLX5_SET(modify_vport_state_in, in, op_mod, opmod); + MLX5_SET(modify_vport_state_in, in, vport_number, vport); + + if (vport) + MLX5_SET(modify_vport_state_in, in, other_vport, 1); + + MLX5_SET(modify_vport_state_in, in, admin_state, state); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); + if (err) + mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n"); + + return err; +} +EXPORT_SYMBOL(mlx5_modify_vport_admin_state); + +static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); +} + +static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); +} + +int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); u8 *out_addr; + int err; out = mlx5_vzalloc(outlen); if (!out) - return; + return -ENOMEM; out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context.permanent_address); + err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); + if (err) + goto out; + + ether_addr_copy(addr, &out_addr[2]); + +out: + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address); + +int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr) +{ + void *in; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + int err; + void *nic_vport_ctx; + u8 *perm_mac; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(mdev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(modify_nic_vport_context_in, in, + field_select.permanent_address, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + + if (vport) + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, + permanent_address); + + ether_addr_copy(&perm_mac[2], addr); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address); + +int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, + u32 vport, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + void *nic_vport_ctx; + int max_list_size; + int req_list_size; + int out_sz; + void *out; + int err; + int i; + + req_list_size = *list_size; + + max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? + 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); + + if (req_list_size > max_list_size) { + mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); - memset(out, 0, outlen); - mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - ether_addr_copy(addr, &out_addr[2]); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; - kvfree(out); + nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context); + req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, + allowed_list_size); + + *list_size = req_list_size; + for (i = 0; i < req_list_size; i++) { + u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]) + 2; + ether_addr_copy(addr_list[i], mac_addr); + } +out: + kfree(out); + return err; } -EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list); + +int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int max_list_size; + int in_sz; + void *in; + int err; + int i; + + max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? + 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + list_size * MLX5_ST_SZ_BYTES(mac_address_layout); + + memset(out, 0, sizeof(out)); + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, + field_select.addresses_list, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, + nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_type, list_type); + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_size, list_size); + + for (i = 0; i < list_size; i++) { + u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]) + 2; + ether_addr_copy(curr_mac, addr_list[i]); + } + + err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); + +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + void *nic_vport_ctx; + int req_list_size; + int max_list_size; + int out_sz; + void *out; + int err; + int i; + + req_list_size = *size; + max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); + if (req_list_size > max_list_size) { + mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + req_list_size * MLX5_ST_SZ_BYTES(vlan_layout); + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, + MLX5_NVPRT_LIST_TYPE_VLAN); + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); + + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; + + nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context); + req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, + allowed_list_size); + + *size = req_list_size; + for (i = 0; i < req_list_size; i++) { + void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]); + vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans); + +int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, + u16 vlans[], + int list_size) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int max_list_size; + int in_sz; + void *in; + int err; + int i; + + max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + list_size * MLX5_ST_SZ_BYTES(vlan_layout); + + memset(out, 0, sizeof(out)); + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, + field_select.addresses_list, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, + nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN); + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_size, list_size); + + for (i = 0; i < list_size; i++) { + void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]); + MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]); + } + + err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, @@ -343,3 +688,65 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); + +int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, + u32 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); + if (err) + goto out; + + *promisc_uc = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_uc); + *promisc_mc = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_mc); + *promisc_all = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_all); + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc); + +int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, + int promisc_uc, + int promisc_mc, + int promisc_all) +{ + void *in; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_err(mdev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_uc, promisc_uc); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_mc, promisc_mc); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_all, promisc_all); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index e36e12219c9b..ce26adcb4988 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -10,6 +10,14 @@ config MLXSW_CORE To compile this driver as a module, choose M here: the module will be called mlxsw_core. +config MLXSW_CORE_HWMON + bool "HWMON support for Mellanox Technologies Switch ASICs" + depends on MLXSW_CORE && HWMON + depends on !(MLXSW_CORE=y && HWMON=m) + default y + ---help--- + Say Y here if you want to expose HWMON interface on mlxsw devices. + config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE @@ -33,7 +41,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" - depends on MLXSW_CORE && NET_SWITCHDEV + depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index af015818fd19..584cac444852 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o +mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 97f0d93caf99..22379eb8e924 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -105,6 +105,10 @@ struct mlxsw_core { struct debugfs_blob_wrapper vsd_blob; struct debugfs_blob_wrapper psid_blob; } dbg; + struct { + u8 *mapping; /* lag_id+port_index to local_port mapping */ + } lag; + struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -814,6 +818,17 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_alloc_stats; } + if (mlxsw_driver->profile->used_max_lag && + mlxsw_driver->profile->used_max_port_per_lag) { + alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag * + mlxsw_driver->profile->max_port_per_lag; + mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_core->lag.mapping) { + err = -ENOMEM; + goto err_alloc_lag_mapping; + } + } + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); if (err) goto err_bus_init; @@ -822,6 +837,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); + if (err) + goto err_hwmon_init; + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, mlxsw_bus_info); if (err) @@ -836,10 +855,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core->driver_priv); err_driver_init: +err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: mlxsw_bus->fini(bus_priv); err_bus_init: + kfree(mlxsw_core->lag.mapping); +err_alloc_lag_mapping: free_percpu(mlxsw_core->pcpu_stats); err_alloc_stats: kfree(mlxsw_core); @@ -857,6 +879,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) mlxsw_core->driver->fini(mlxsw_core->driver_priv); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); + kfree(mlxsw_core->lag.mapping); free_percpu(mlxsw_core->pcpu_stats); kfree(mlxsw_core); mlxsw_core_driver_put(device_kind); @@ -1188,11 +1211,25 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; struct mlxsw_core_pcpu_stats *pcpu_stats; - u8 local_port = rx_info->sys_port; + u8 local_port; bool found = false; - dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", - __func__, rx_info->sys_port, rx_info->trap_id); + if (rx_info->is_lag) { + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", + __func__, rx_info->u.lag_id, + rx_info->trap_id); + /* Upper layer does not care if the skb came from LAG or not, + * so just get the local_port for the lag port and push it up. + */ + local_port = mlxsw_core_lag_mapping_get(mlxsw_core, + rx_info->u.lag_id, + rx_info->lag_port_index); + } else { + local_port = rx_info->u.sys_port; + } + + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", + __func__, local_port, rx_info->trap_id); if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || (local_port >= MLXSW_PORT_MAX_PORTS)) @@ -1236,6 +1273,48 @@ drop: } EXPORT_SYMBOL(mlxsw_core_skb_receive); +static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + return mlxsw_core->driver->profile->max_port_per_lag * lag_id + + port_index; +} + +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + mlxsw_core->lag.mapping[index] = local_port; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); + +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + return mlxsw_core->lag.mapping[index]; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); + +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port) +{ + int i; + + for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) { + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, i); + + if (mlxsw_core->lag.mapping[index] == local_port) + mlxsw_core->lag.mapping[index] = 0; + } +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 807827350a89..a01723600f0a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -112,13 +112,25 @@ int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload); struct mlxsw_rx_info { - u16 sys_port; + bool is_lag; + union { + u16 sys_port; + u16 lag_id; + } u; + u8 lag_port_index; int trap_id; }; void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_info *rx_info); +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port); +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index); +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { @@ -209,4 +221,24 @@ struct mlxsw_bus_info { u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; }; +struct mlxsw_hwmon; + +#ifdef CONFIG_MLXSW_CORE_HWMON + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon); +void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon); + +#else + +static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + return 0; +} + +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c new file mode 100644 index 000000000000..1ac8bf187168 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -0,0 +1,372 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/hwmon.h> +#include <linux/err.h> + +#include "core.h" + +#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 +#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ + MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX) + +struct mlxsw_hwmon_attr { + struct device_attribute dev_attr; + struct mlxsw_hwmon *hwmon; + unsigned int type_index; + char name[32]; +}; + +struct mlxsw_hwmon { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct device *hwmon_dev; + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1]; + struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT]; + unsigned int attrs_count; +}; + +static ssize_t mlxsw_hwmon_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + return sprintf(buf, "%u\n", temp); +} + +static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp_max; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL); + return sprintf(buf, "%u\n", temp_max); +} + +static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val != 1) + return -EINVAL; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n"); + return err; + } + return len; +} + +static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsm_pl[MLXSW_REG_MFSM_LEN]; + int err; + + mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); + return err; + } + return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + int err; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n"); + return err; + } + return sprintf(buf, "%u\n", + mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val > 255) + return -EINVAL; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n"); + return err; + } + return len; +} + +enum mlxsw_hwmon_attr_type { + MLXSW_HWMON_ATTR_TYPE_TEMP, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + MLXSW_HWMON_ATTR_TYPE_PWM, +}; + +static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, + enum mlxsw_hwmon_attr_type attr_type, + unsigned int type_index, unsigned int num) { + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr; + unsigned int attr_index; + + attr_index = mlxsw_hwmon->attrs_count; + mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index]; + + switch (attr_type) { + case MLXSW_HWMON_ATTR_TYPE_TEMP: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_highest", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_RST: + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_reset_history", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_FAN_RPM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "fan%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_PWM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "pwm%u", num + 1); + break; + default: + WARN_ON(1); + } + + mlxsw_hwmon_attr->type_index = type_index; + mlxsw_hwmon_attr->hwmon = mlxsw_hwmon; + mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name; + sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr); + + mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; + mlxsw_hwmon->attrs_count++; +} + +static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mtcap_pl[MLXSW_REG_MTCAP_LEN]; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + u8 sensor_count; + int i; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtcap), mtcap_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); + return err; + } + sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < sensor_count; i++) { + mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, + MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n", + i); + return err; + } + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i); + } + return 0; +} + +static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mfcr_pl[MLXSW_REG_MFCR_LEN]; + enum mlxsw_reg_mfcr_pwm_frequency freq; + unsigned int type_index; + unsigned int num; + u16 tacho_active; + u8 pwm_active; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfcr), mfcr_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get to probe PWMs and Tachometers\n"); + return err; + } + mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { + if (tacho_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + type_index, num++); + } + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { + if (pwm_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_PWM, + type_index, num++); + } + return 0; +} + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + struct mlxsw_hwmon *mlxsw_hwmon; + struct device *hwmon_dev; + int err; + + mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), + GFP_KERNEL); + if (!mlxsw_hwmon) + return -ENOMEM; + mlxsw_hwmon->core = mlxsw_core; + mlxsw_hwmon->bus_info = mlxsw_bus_info; + + err = mlxsw_hwmon_temp_init(mlxsw_hwmon); + if (err) + goto err_temp_init; + + err = mlxsw_hwmon_fans_init(mlxsw_hwmon); + if (err) + goto err_fans_init; + + mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; + mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, + "mlxsw", + mlxsw_hwmon, + mlxsw_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + err = PTR_ERR(hwmon_dev); + goto err_hwmon_register; + } + + mlxsw_hwmon->hwmon_dev = hwmon_dev; + *p_hwmon = mlxsw_hwmon; + return 0; + +err_hwmon_register: +err_fans_init: +err_temp_init: + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index de69e719dc9d..c071077aafbd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -384,7 +384,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); - mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7); + mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -686,11 +686,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - /* We do not support lag now */ - if (mlxsw_pci_cqe_lag_get(cqe)) - goto drop; + if (mlxsw_pci_cqe_lag_get(cqe)) { + rx_info.is_lag = true; + rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); + rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); + } else { + rx_info.is_lag = false; + rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); + } - rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe); @@ -699,7 +703,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); -put_new_skb: memset(wqe, 0, q->elem_size); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); if (err) @@ -708,10 +711,6 @@ put_new_skb: q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return; - -drop: - dev_kfree_skb_any(skb); - goto put_new_skb; } static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 142f33d978c5..912106054ff2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -129,13 +129,15 @@ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); */ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); -/* pci_cqe_system_port +/* pci_cqe_system_port/lag_id * When lag=0: System port on which the packet was received * When lag=1: * bits [15:4] LAG ID on which the packet was received * bits [3:0] sub_port on which the packet was received */ MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); +MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); /* pci_cqe_wqe_counter * WQE count of the WQEs completed on the associated dqn diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 236fb5d2ad69..0c5237264e3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,6 +99,55 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); +/* SMID - Switch Multicast ID + * -------------------------- + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x240 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, + u8 port, bool set) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, port, set); + mlxsw_reg_smid_port_mask_set(payload, port, 1); +} + /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -286,6 +335,8 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, + MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, }; /* reg_sfd_rec_type @@ -376,36 +427,144 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_REG_SFD_REC_LEN, 0x0C, false); -static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, - enum mlxsw_reg_sfd_rec_policy policy, - const char *mac, u16 vid, - enum mlxsw_reg_sfd_rec_action action, - u8 local_port) +static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_type rec_type, + const char *mac, + enum mlxsw_reg_sfd_rec_action action) { u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); if (rec_index >= num_rec) mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); - mlxsw_reg_sfd_rec_type_set(payload, rec_index, - MLXSW_REG_SFD_REC_TYPE_UNICAST); - mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type); mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); - mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); - mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); +} + +static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, + u8 local_port) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid); mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); } static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, - char *mac, u16 *p_vid, + char *mac, u16 *p_fid_vid, u8 *p_local_port) { mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); + *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); } +/* reg_sfd_uc_lag_sub_port + * LAG sub port. + * Must be 0 if multichannel VEPA is not enabled. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_fid_vid + * Filtering ID or VLAN ID + * For SwitchX and SwitchX-2: + * - Dynamic entries (policy 2,3) use FID + * - Static entries (policy 0) use VID + * - When independent learning is configured, VID=FID + * For Spectrum: use FID for both Dynamic and Static entries. + * VID should not be used. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_lag_vid + * Indicates VID in case of vFIDs. Reserved for FIDs. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_lag_lag_id + * LAG Identifier - pointer into the LAG descriptor table. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 lag_vid, + u16 lag_id) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG, + mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid); + mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id); +} + +static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); +} + +/* reg_sfd_mc_pgi + * + * Multicast port group index - index into the port group table. + * Value 0x1FFF indicates the pgi should point to the MID entry. + * For Spectrum this value must be set to 0x1FFF + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_fid_vid + * + * Filtering ID or VLAN ID + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_mid + * + * Multicast identifier - global identifier that represents the multicast + * group across all devices. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 mid) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action); + mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF); + mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -456,8 +615,12 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, enum mlxsw_reg_sfn_rec_type { /* MAC addresses learned on a regular port. */ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, - /* Aged-out MAC address on a regular port */ + /* MAC addresses learned on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6, + /* Aged-out MAC address on a regular port. */ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, + /* Aged-out MAC address on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8, }; /* reg_sfn_rec_type @@ -505,6 +668,22 @@ static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); } +/* reg_sfn_mac_lag_lag_id + * LAG ID (pointer into the LAG descriptor table). + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10, + MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. @@ -865,6 +1044,293 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SLDR - Switch LAG Descriptor Register + * ----------------------------------------- + * The switch LAG descriptor register is populated by LAG descriptors. + * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to + * max_lag-1. + */ +#define MLXSW_REG_SLDR_ID 0x2014 +#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */ + +static const struct mlxsw_reg_info mlxsw_reg_sldr = { + .id = MLXSW_REG_SLDR_ID, + .len = MLXSW_REG_SLDR_LEN, +}; + +enum mlxsw_reg_sldr_op { + /* Indicates a creation of a new LAG-ID, lag_id must be valid */ + MLXSW_REG_SLDR_OP_LAG_CREATE, + MLXSW_REG_SLDR_OP_LAG_DESTROY, + /* Ports that appear in the list have the Distributor enabled */ + MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST, + /* Removes ports from the disributor list */ + MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST, +}; + +/* reg_sldr_op + * Operation. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3); + +/* reg_sldr_lag_id + * LAG identifier. The lag_id is the index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10); + +static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +/* reg_sldr_num_ports + * The number of member ports of the LAG. + * Reserved for Create / Destroy operations + * For Add / Remove operations - indicates the number of ports in the list. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8); + +/* reg_sldr_system_port + * System port. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false); + +static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +/* SLCR - Switch LAG Configuration 2 Register + * ------------------------------------------- + * The Switch LAG Configuration register is used for configuring the + * LAG properties of the switch. + */ +#define MLXSW_REG_SLCR_ID 0x2015 +#define MLXSW_REG_SLCR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcr = { + .id = MLXSW_REG_SLCR_ID, + .len = MLXSW_REG_SLCR_LEN, +}; + +enum mlxsw_reg_slcr_pp { + /* Global Configuration (for all ports) */ + MLXSW_REG_SLCR_PP_GLOBAL, + /* Per port configuration, based on local_port field */ + MLXSW_REG_SLCR_PP_PER_PORT, +}; + +/* reg_slcr_pp + * Per Port Configuration + * Note: Reading at Global mode results in reading port 1 configuration. + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); + +/* reg_slcr_local_port + * Local port number + * Supported from CPU port + * Not supported from router port + * Reserved when pp = Global Configuration + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8); + +enum mlxsw_reg_slcr_type { + MLXSW_REG_SLCR_TYPE_CRC, /* default */ + MLXSW_REG_SLCR_TYPE_XOR, + MLXSW_REG_SLCR_TYPE_RANDOM, +}; + +/* reg_slcr_type + * Hash type + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); + +/* Ingress port */ +#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0) +/* SMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1) +/* SMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2) +#define MLXSW_REG_SLCR_LAG_HASH_SMAC \ + (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP) +/* DMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3) +/* DMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4) +#define MLXSW_REG_SLCR_LAG_HASH_DMAC \ + (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP) +/* Ethertype - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5) +/* Ethertype - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6) +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \ + (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \ + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP) +/* VLAN ID - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7) +/* VLAN ID - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8) +#define MLXSW_REG_SLCR_LAG_HASH_VLANID \ + (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \ + MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP) +/* Source IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9) +/* Destination IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10) +/* TCP/UDP source port */ +#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11) +/* TCP/UDP destination port*/ +#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12) +/* IPv4 Protocol/IPv6 Next Header */ +#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13) +/* IPv6 Flow label */ +#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14) +/* SID - FCoE source ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15) +/* DID - FCoE destination ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16) +/* OXID - FCoE originator exchange ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17) +/* Destination QP number - for RoCE packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19) + +/* reg_slcr_lag_hash + * LAG hashing configuration. This is a bitmask, in which each set + * bit includes the corresponding item in the LAG hash calculation. + * The default lag_hash contains SMAC, DMAC, VLANID and + * Ethertype (for all packet types). + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +{ + MLXSW_REG_ZERO(slcr, payload); + mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); + mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR); + mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); +} + +/* SLCOR - Switch LAG Collector Register + * ------------------------------------- + * The Switch LAG Collector register controls the Local Port membership + * in a LAG and enablement of the collector. + */ +#define MLXSW_REG_SLCOR_ID 0x2016 +#define MLXSW_REG_SLCOR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcor = { + .id = MLXSW_REG_SLCOR_ID, + .len = MLXSW_REG_SLCOR_LEN, +}; + +enum mlxsw_reg_slcor_col { + /* Port is added with collector disabled */ + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT, +}; + +/* reg_slcor_col + * Collector configuration + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2); + +/* reg_slcor_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8); + +/* reg_slcor_lag_id + * LAG Identifier. Index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10); + +/* reg_slcor_port_index + * Port index in the LAG list. Only valid on Add Port to LAG col. + * Valid range is from 0 to cap_max_lag_members-1 + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10); + +static inline void mlxsw_reg_slcor_pack(char *payload, + u8 local_port, u16 lag_id, + enum mlxsw_reg_slcor_col col) +{ + MLXSW_REG_ZERO(slcor, payload); + mlxsw_reg_slcor_col_set(payload, col); + mlxsw_reg_slcor_local_port_set(payload, local_port); + mlxsw_reg_slcor_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_slcor_port_add_pack(char *payload, + u8 local_port, u16 lag_id, + u8 port_index) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT); + mlxsw_reg_slcor_port_index_set(payload, port_index); +} + +static inline void mlxsw_reg_slcor_port_remove_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT); +} + +static inline void mlxsw_reg_slcor_col_enable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + +static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + /* SPMLR - Switch Port MAC Learning Register * ----------------------------------------- * Controls the Switch MAC learning policy per port. @@ -2087,6 +2553,284 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* MFCR - Management Fan Control Register + * -------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFCR_ID 0x9001 +#define MLXSW_REG_MFCR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfcr = { + .id = MLXSW_REG_MFCR_ID, + .len = MLXSW_REG_MFCR_LEN, +}; + +enum mlxsw_reg_mfcr_pwm_frequency { + MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00, + MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01, + MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02, + MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40, + MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41, + MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42, + MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43, + MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44, +}; + +/* reg_mfcr_pwm_frequency + * Controls the frequency of the PWM signal. + * Access: RW + */ +MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6); + +#define MLXSW_MFCR_TACHOS_MAX 10 + +/* reg_mfcr_tacho_active + * Indicates which of the tachometer is active (bit per tachometer). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX); + +#define MLXSW_MFCR_PWMS_MAX 5 + +/* reg_mfcr_pwm_active + * Indicates which of the PWM control is active (bit per PWM). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX); + +static inline void +mlxsw_reg_mfcr_pack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency) +{ + MLXSW_REG_ZERO(mfcr, payload); + mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency); +} + +static inline void +mlxsw_reg_mfcr_unpack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency, + u16 *p_tacho_active, u8 *p_pwm_active) +{ + *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload); + *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload); + *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload); +} + +/* MFSC - Management Fan Speed Control Register + * -------------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFSC_ID 0x9002 +#define MLXSW_REG_MFSC_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsc = { + .id = MLXSW_REG_MFSC_ID, + .len = MLXSW_REG_MFSC_LEN, +}; + +/* reg_mfsc_pwm + * Fan pwm to control / monitor. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3); + +/* reg_mfsc_pwm_duty_cycle + * Controls the duty cycle of the PWM. Value range from 0..255 to + * represent duty cycle of 0%...100%. + * Access: RW + */ +MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8); + +static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm, + u8 pwm_duty_cycle) +{ + MLXSW_REG_ZERO(mfsc, payload); + mlxsw_reg_mfsc_pwm_set(payload, pwm); + mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle); +} + +/* MFSM - Management Fan Speed Measurement + * --------------------------------------- + * This register controls the settings of the Tacho measurements and + * enables reading the Tachometer measurements. + */ +#define MLXSW_REG_MFSM_ID 0x9003 +#define MLXSW_REG_MFSM_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsm = { + .id = MLXSW_REG_MFSM_ID, + .len = MLXSW_REG_MFSM_LEN, +}; + +/* reg_mfsm_tacho + * Fan tachometer index. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4); + +/* reg_mfsm_rpm + * Fan speed (round per minute). + * Access: RO + */ +MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16); + +static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho) +{ + MLXSW_REG_ZERO(mfsm, payload); + mlxsw_reg_mfsm_tacho_set(payload, tacho); +} + +/* MTCAP - Management Temperature Capabilities + * ------------------------------------------- + * This register exposes the capabilities of the device and + * system temperature sensing. + */ +#define MLXSW_REG_MTCAP_ID 0x9009 +#define MLXSW_REG_MTCAP_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mtcap = { + .id = MLXSW_REG_MTCAP_ID, + .len = MLXSW_REG_MTCAP_LEN, +}; + +/* reg_mtcap_sensor_count + * Number of sensors supported by the device. + * This includes the QSFP module sensors (if exists in the QSFP module). + * Access: RO + */ +MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); + +/* MTMP - Management Temperature + * ----------------------------- + * This register controls the settings of the temperature measurements + * and enables reading the temperature measurements. Note that temperature + * is in 0.125 degrees Celsius. + */ +#define MLXSW_REG_MTMP_ID 0x900A +#define MLXSW_REG_MTMP_LEN 0x20 + +static const struct mlxsw_reg_info mlxsw_reg_mtmp = { + .id = MLXSW_REG_MTMP_ID, + .len = MLXSW_REG_MTMP_LEN, +}; + +/* reg_mtmp_sensor_index + * Sensors index to access. + * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially + * (module 0 is mapped to sensor_index 64). + * Access: Index + */ +MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7); + +/* Convert to milli degrees Celsius */ +#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125) + +/* reg_mtmp_temperature + * Temperature reading from the sensor. Reading is in 0.125 Celsius + * degrees units. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16); + +/* reg_mtmp_mte + * Max Temperature Enable - enables measuring the max temperature on a sensor. + * Access: RW + */ +MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1); + +/* reg_mtmp_mtr + * Max Temperature Reset - clears the value of the max temperature register. + * Access: WO + */ +MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1); + +/* reg_mtmp_max_temperature + * The highest measured temperature from the sensor. + * When the bit mte is cleared, the field max_temperature is reserved. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); + +#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 + +/* reg_mtmp_sensor_name + * Sensor Name + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE); + +static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index, + bool max_temp_enable, + bool max_temp_reset) +{ + MLXSW_REG_ZERO(mtmp, payload); + mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); + mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); + mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); +} + +static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, + unsigned int *p_max_temp, + char *sensor_name) +{ + u16 temp; + + if (p_temp) { + temp = mlxsw_reg_mtmp_temperature_get(payload); + *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (p_max_temp) { + temp = mlxsw_reg_mtmp_max_temperature_get(payload); + *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (sensor_name) + mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); +} + +/* MLCR - Management LED Control Register + * -------------------------------------- + * Controls the system LEDs. + */ +#define MLXSW_REG_MLCR_ID 0x902B +#define MLXSW_REG_MLCR_LEN 0x0C + +static const struct mlxsw_reg_info mlxsw_reg_mlcr = { + .id = MLXSW_REG_MLCR_ID, + .len = MLXSW_REG_MLCR_LEN, +}; + +/* reg_mlcr_local_port + * Local port number. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8); + +#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF + +/* reg_mlcr_beacon_duration + * Duration of the beacon to be active, in seconds. + * 0x0 - Will turn off the beacon. + * 0xFFFF - Will turn on the beacon until explicitly turned off. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16); + +/* reg_mlcr_beacon_remain + * Remaining duration of the beacon, in seconds. + * 0xFFFF indicates an infinite amount of time. + * Access: RO + */ +MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16); + +static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, + bool active) +{ + MLXSW_REG_ZERO(mlcr, payload); + mlxsw_reg_mlcr_local_port_set(payload, local_port); + mlxsw_reg_mlcr_beacon_duration_set(payload, active ? + MLXSW_REG_MLCR_DURATION_MAX : 0); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -2357,6 +3101,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; case MLXSW_REG_SFDAT_ID: @@ -2375,6 +3121,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SLDR_ID: + return "SLDR"; + case MLXSW_REG_SLCR_ID: + return "SLCR"; + case MLXSW_REG_SLCOR_ID: + return "SLCOR"; case MLXSW_REG_SPMLR_ID: return "SPMLR"; case MLXSW_REG_SVFA_ID: @@ -2405,6 +3157,18 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_MFCR_ID: + return "MFCR"; + case MLXSW_REG_MFSC_ID: + return "MFSC"; + case MLXSW_REG_MFSM_ID: + return "MFSM"; + case MLXSW_REG_MTCAP_ID: + return "MTCAP"; + case MLXSW_REG_MTMP_ID: + return "MTMP"; + case MLXSW_REG_MLCR_ID: + return "MLCR"; case MLXSW_REG_SBPR_ID: return "SBPR"; case MLXSW_REG_SBCM_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3be4a2355ead..ce6845d534a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -48,6 +48,7 @@ #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/bitops.h> +#include <linux/list.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(vfid, mlxsw_sp->active_vfids); - return 0; -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(vfid, mlxsw_sp->active_vfids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned char *addr) { @@ -417,6 +391,10 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static void mlxsw_sp_set_rx_mode(struct net_device *dev) +{ +} + static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -545,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static struct mlxsw_sp_vfid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { + if (vfid->vid == vid) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, + MLXSW_SP_VFID_PORT_MAX); +} + +static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->vid = vid; + + list_add(&vfid->list, &mlxsw_sp->port_vfids.list); + set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_vfid *vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); + if (!mlxsw_sp_vport) + return NULL; + + /* dev will be set correctly after the VLAN device is linked + * with the real device. In case of bridge SELF invocation, dev + * will remain as is. + */ + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; + mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; + mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; + mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; + mlxsw_sp_vport->vport.vfid = vfid; + mlxsw_sp_vport->vport.vid = vfid->vid; + + list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); + + return mlxsw_sp_vport; +} + +static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + list_del(&mlxsw_sp_vport->vport.list); + kfree(mlxsw_sp_vport); +} + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *sftr_pl; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -559,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { netdev_warn(dev, "VID=%d already configured\n", vid); return 0; } - if (!test_bit(vid, mlxsw_sp->active_vfids)) { - err = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create vFID=%d\n", - MLXSW_SP_VFID_BASE + vid); - return err; + vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!vfid) { + vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(vfid); } + } - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) { - err = -ENOMEM; - goto err_flood_table_alloc; - } - mlxsw_reg_sftr_pack(sftr_pl, 0, vid, - MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, - MLXSW_PORT_CPU_PORT, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - kfree(sftr_pl); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + if (!mlxsw_sp_vport) { + netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); + err = -ENOMEM; + goto err_port_vport_create; + } + + if (!vfid->nr_vports) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, + true, false); if (err) { - netdev_err(dev, "Failed to configure flood table\n"); - goto err_flood_table_config; + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_vport_flood_set; } } - /* In case we fail in the following steps, we intentionally do not - * destroy the associated vFID. - */ - /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit * {Port, VID} to FID mappings and set the port's mode to Virtual mode. */ - if (!mlxsw_sp_port->nr_vfids) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to Virtual mode\n"); - return err; + goto err_port_vp_mode_trans; } } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, MLXSW_SP_VFID_BASE + vid, vid); + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); if (err) { netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); goto err_port_vid_to_fid_set; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); if (err) { netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); goto err_port_vid_learning_set; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); goto err_port_add_vid; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_FORWARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); goto err_port_stp_state_set; } - mlxsw_sp_port->nr_vfids++; - set_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports++; return 0; -err_flood_table_config: -err_flood_table_alloc: - mlxsw_sp_vfid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - MLXSW_SP_VFID_BASE + vid, vid); + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); err_port_vid_to_fid_set: - mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); +err_port_vp_mode_trans: + if (!vfid->nr_vports) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); +err_vport_flood_set: + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); +err_port_vport_create: + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } @@ -660,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is removed from HW filter when device goes down, but @@ -668,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, if (!vid) return 0; - if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { netdev_warn(dev, "VID=%d does not exist\n", vid); return 0; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + vfid = mlxsw_sp_vport->vport.vfid; + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); if (err) { netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, MLXSW_SP_VFID_BASE + vid, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); if (err) { netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); return err; } @@ -707,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (mlxsw_sp_port->nr_vfids == 1) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to VLAN mode\n"); @@ -715,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, } } - mlxsw_sp_port->nr_vfids--; - clear_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports--; + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); + + /* Destroy the vFID if no vPorts are assigned to it anymore. */ + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); return 0; } @@ -725,6 +838,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, @@ -859,6 +973,29 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, } } +static int mlxsw_sp_port_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mlcr_pl[MLXSW_REG_MLCR_LEN]; + bool active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + active = true; + break; + case ETHTOOL_ID_INACTIVE: + active = false; + break; + default: + return -EOPNOTSUPP; + } + + mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); +} + static void mlxsw_sp_port_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -1205,6 +1342,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, .get_sset_count = mlxsw_sp_port_get_sset_count, .get_settings = mlxsw_sp_port_get_settings, @@ -1216,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *dev; bool usable; + size_t bytes; int err; dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1225,10 +1364,18 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; - mlxsw_sp_port->learning = 1; - mlxsw_sp_port->learning_sync = 1; - mlxsw_sp_port->uc_flood = 1; - mlxsw_sp_port->pvid = 1; + bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); + mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->active_vlans) { + err = -ENOMEM; + goto err_port_active_vlans_alloc; + } + mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->untagged_vlans) { + err = -ENOMEM; + goto err_port_untagged_vlans_alloc; + } + INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1330,16 +1477,29 @@ err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: + kfree(mlxsw_sp_port->untagged_vlans); +err_port_untagged_vlans_alloc: + kfree(mlxsw_sp_port->active_vlans); +err_port_active_vlans_alloc: free_netdev(dev); return err; } -static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { - u16 vfid; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); + list_for_each_entry_safe(mlxsw_sp_vport, tmp, + &mlxsw_sp_port->vports_list, vport.list) { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + /* vPorts created for VLAN devices should already be gone + * by now, since we unregistered the port netdev. + */ + WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); + mlxsw_sp_port_kill_vid(dev, 0, vid); + } } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) @@ -1348,10 +1508,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; - mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); + kfree(mlxsw_sp_port->untagged_vlans); + kfree(mlxsw_sp_port->active_vlans); free_netdev(mlxsw_sp_port->dev); } @@ -1633,16 +1795,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, enum mlxsw_sp_flood_table flood_table; char sfgc_pl[MLXSW_REG_SFGC_LEN]; - if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - flood_table = 0; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) - flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; - } + + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -1653,9 +1814,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) { int type, err; - /* For non-offloaded netdevs, flood all traffic types to CPU - * port. - */ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { if (type == MLXSW_REG_SFGC_TYPE_RESERVED) continue; @@ -1664,15 +1822,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); if (err) return err; - } - - /* For bridged ports, use one flooding table for unknown unicast - * traffic and a second table for unregistered multicast and - * broadcast. - */ - for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { - if (type == MLXSW_REG_SFGC_TYPE_RESERVED) - continue; err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); @@ -1683,6 +1832,22 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) +{ + char slcr_pl[MLXSW_REG_SLCR_LEN]; + + mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | + MLXSW_REG_SLCR_LAG_HASH_DMAC | + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | + MLXSW_REG_SLCR_LAG_HASH_VLANID | + MLXSW_REG_SLCR_LAG_HASH_SIP | + MLXSW_REG_SLCR_LAG_HASH_DIP | + MLXSW_REG_SLCR_LAG_HASH_SPORT | + MLXSW_REG_SLCR_LAG_HASH_DPORT | + MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); +} + static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -1691,6 +1856,9 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { @@ -1701,7 +1869,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - goto err_ports_create; + return err; } err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); @@ -1728,6 +1896,12 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, goto err_buffers_init; } + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -1737,6 +1911,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, return 0; err_switchdev_init: +err_lag_init: err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -1744,8 +1919,6 @@ err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); err_event_register: mlxsw_sp_ports_remove(mlxsw_sp); -err_ports_create: - mlxsw_sp_vfids_fini(mlxsw_sp); return err; } @@ -1757,18 +1930,17 @@ static void mlxsw_sp_fini(void *priv) mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); - mlxsw_sp_vfids_fini(mlxsw_sp); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_lag = 1, - .max_lag = 64, + .max_lag = MLXSW_SP_LAG_MAX, .used_max_port_per_lag = 1, - .max_port_per_lag = 16, + .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, .used_max_mid = 1, - .max_mid = 7000, + .max_mid = MLXSW_SP_MID_MAX, .used_max_pgt = 1, .max_pgt = 0, .used_max_system_port = 1, @@ -1782,8 +1954,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .flood_mode = 3, .max_fid_offset_flood_tables = 2, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 1, - .fid_flood_table_size = VLAN_N_VID, + .max_fid_flood_tables = 2, + .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -1824,24 +1996,29 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) */ err = mlxsw_sp_port_kill_vid(dev, 0, 1); if (err) - netdev_err(dev, "Failed to remove VID 1\n"); + return err; - return err; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->bridged = 1; + + return 0; } static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - int err; + + mlxsw_sp_port->learning = 0; + mlxsw_sp_port->learning_sync = 0; + mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to add VID 1\n"); - - return err; + return mlxsw_sp_port_add_vid(dev, 0, 1); } static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, @@ -1865,19 +2042,293 @@ static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->master_bridge.dev = NULL; } -static int mlxsw_sp_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id, u8 port_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id, port_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + u16 *p_lag_id) +{ + struct mlxsw_sp_upper *lag; + int free_lag_id = -1; + int i; + + for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { + lag = mlxsw_sp_lag_get(mlxsw_sp, i); + if (lag->ref_count) { + if (lag->dev == lag_dev) { + *p_lag_id = i; + return 0; + } + } else if (free_lag_id < 0) { + free_lag_id = i; + } + } + if (free_lag_id < 0) + return -EBUSY; + *p_lag_id = free_lag_id; + return 0; +} + +static bool +mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + struct netdev_lag_upper_info *lag_upper_info) +{ + u16 lag_id; + + if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) + return false; + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + return true; +} + +static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, + u16 lag_id, u8 *p_port_index) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { + *p_port_index = i; + return 0; + } + } + return -EBUSY; +} + +static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id; + u8 port_index; + int err; + + err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); + if (err) + return err; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + if (!lag->ref_count) { + err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); + if (err) + return err; + lag->dev = lag_dev; + } + + err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); + if (err) + return err; + err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); + if (err) + goto err_col_port_add; + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); + if (err) + goto err_col_port_enable; + + mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lag_id = lag_id; + mlxsw_sp_port->lagged = 1; + lag->ref_count++; + return 0; + +err_col_port_add: + if (!lag->ref_count) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + return err; +} + +static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id = mlxsw_sp_port->lag_id; + int err; + + if (!mlxsw_sp_port->lagged) + return 0; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + WARN_ON(lag->ref_count == 0); + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + if (err) + return err; + err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + if (err) + return err; + + if (lag->ref_count == 1) { + err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + if (err) + return err; + } + + mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lagged = 0; + lag->ref_count--; + return 0; +} + +static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool lag_tx_enabled) +{ + if (lag_tx_enabled) + return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + else + return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); +} + +static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, + struct netdev_lag_lower_state_info *info) +{ + return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev); + +static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + mlxsw_sp_vport->dev = vlan_dev; + + return 0; +} + +static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + /* When removing a VLAN device while still bridged we should first + * remove it from the bridge, as we receive the bridge's notification + * when the vPort is already gone. + */ + if (mlxsw_sp_vport->bridged) { + struct net_device *br_dev; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + } + + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + + return 0; +} + +static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, + unsigned long event, void *ptr) { - struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err; - if (!mlxsw_sp_port_dev_check(dev)) - return NOTIFY_DONE; - mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; info = ptr; @@ -1885,28 +2336,66 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; /* HW limitation forbids to put ports to multiple bridges. */ - if (info->master && info->linking && - netif_is_bridge_master(upper_dev) && + if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) return NOTIFY_BAD; + if (netif_is_lag_master(upper_dev) && + !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, + info->upper_info)) + return NOTIFY_BAD; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->master && - netif_is_bridge_master(upper_dev)) { + if (is_vlan_dev(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to link VLAN device\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to unlink VLAN device\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) + if (err) { netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - mlxsw_sp_port->bridged = 1; } else { err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); - if (err) - netdev_err(dev, "Failed to leave bridge\n"); - mlxsw_sp_port->bridged = 0; mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_lag_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_lag_join(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join link aggregation\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave link aggregation\n"); + return NOTIFY_BAD; + } } } break; @@ -1915,6 +2404,443 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } +static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + mlxsw_sp_port = netdev_priv(dev); + info = ptr; + + switch (event) { + case NETDEV_CHANGELOWERSTATE: + if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { + err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, + info->lower_state_info); + if (err) + netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + switch (event) { + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGEUPPER: + return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); + case NETDEV_CHANGELOWERSTATE: + return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static struct mlxsw_sp_vfid * +mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { + if (vfid->br_dev == br_dev) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +{ + return vfid - MLXSW_SP_VFID_PORT_MAX; +} + +static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +{ + return MLXSW_SP_VFID_PORT_MAX + br_vfid; +} + +static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, + MLXSW_SP_VFID_BR_MAX); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (n_vfid == MLXSW_SP_VFID_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->br_dev = br_dev; + + list_add(&vfid->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + + clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid, *new_vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + WARN_ON(!vfid); + return -EINVAL; + } + + /* We need a vFID to go back to after leaving the bridge's vFID. */ + new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!new_vfid) { + new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(new_vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(new_vfid); + } + } + + /* Invalidate existing {Port, VID} to vFID mapping and create a new + * one for the new vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(new_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + new_vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning\n"); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); + if (err) { + netdev_err(dev, "Failed clear to clear flooding\n"); + goto err_vport_flood_set; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + new_vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = new_vfid; + vfid->nr_vports--; + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + + return 0; + +err_vport_flood_set: +err_port_vid_learning_set: +err_port_vid_to_fid_validate: +err_port_vid_to_fid_invalidate: + /* Rollback vFID only if new. */ + if (!new_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + return err; +} + +static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create bridge vFID\n"); + return PTR_ERR(vfid); + } + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + if (err) { + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_port_flood_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning\n"); + goto err_port_vid_learning_set; + } + + /* We need to invalidate existing {Port, VID} to vFID mapping and + * create a new one for the bridge's vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + old_vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = vfid; + old_vfid->nr_vports--; + if (!old_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); + + mlxsw_sp_vport->learning = 1; + mlxsw_sp_vport->learning_sync = 1; + mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->bridged = 1; + + return 0; + +err_port_vid_to_fid_validate: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); +err_port_vid_to_fid_invalidate: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); +err_port_vid_learning_set: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); +err_port_flood_set: + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + return err; +} + +static bool +mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *br_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + return false; + } + + return true; +} + +static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct net_device *upper_dev; + int err; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; + if (!netif_is_bridge_master(upper_dev)) + return NOTIFY_BAD; + /* We can't have multiple VLAN interfaces configured on + * the same port and being members in the same bridge. + */ + if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master) + break; + if (info->linking) { + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return NOTIFY_BAD; + } + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } + } else { + /* We ignore bridge's unlinking notifications if vPort + * is gone, since we already left the bridge when the + * VLAN device was unlinked from the real device. + */ + if (!mlxsw_sp_vport) + return NOTIFY_DONE; + err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, + vid); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, + unsigned long event, void *ptr) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, + vid); + else if (netif_is_lag_master(real_dev)) + return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, + vid); + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_netdevice_port_event(dev, event, ptr); + + if (netif_is_lag_master(dev)) + return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + + if (is_vlan_dev(dev)) + return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + + return NOTIFY_DONE; +} + static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4365c8bccc6d..a23dc610d259 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -41,16 +41,73 @@ #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/if_vlan.h> +#include <linux/list.h> #include <net/switchdev.h> +#include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID +#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ +#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) + +#define MLXSW_SP_LAG_MAX 64 +#define MLXSW_SP_PORT_PER_LAG_MAX 16 + +#define MLXSW_SP_MID_MAX 7000 struct mlxsw_sp_port; +struct mlxsw_sp_upper { + struct net_device *dev; + unsigned int ref_count; +}; + +struct mlxsw_sp_vfid { + struct list_head list; + u16 nr_vports; + u16 vfid; /* Starting at 0 */ + struct net_device *br_dev; + u16 vid; +}; + +struct mlxsw_sp_mid { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 mid; + unsigned int ref_count; +}; + +static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) +{ + return MLXSW_SP_VFID_BASE + vfid; +} + +static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) +{ + return fid - MLXSW_SP_VFID_BASE; +} + +static inline bool mlxsw_sp_fid_is_vfid(u16 fid) +{ + return fid >= MLXSW_SP_VFID_BASE; +} + struct mlxsw_sp { - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; + } port_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; + } br_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + } br_mids; unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -63,12 +120,17 @@ struct mlxsw_sp { } fdb_notify; #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct { - struct net_device *dev; - unsigned int ref_count; - } master_bridge; + struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */ + struct mlxsw_sp_upper master_bridge; + struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; }; +static inline struct mlxsw_sp_upper * +mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + return &mlxsw_sp->lags[lag_id]; +} + struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; @@ -87,15 +149,87 @@ struct mlxsw_sp_port { u8 learning:1, learning_sync:1, uc_flood:1, - bridged:1; + bridged:1, + lagged:1; u16 pvid; + u16 lag_id; + struct { + struct list_head list; + struct mlxsw_sp_vfid *vfid; + u16 vid; + } vport; /* 802.1Q bridge VLANs */ - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long *active_vlans; + unsigned long *untagged_vlans; /* VLAN interfaces */ - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; - u16 nr_vfids; + struct list_head vports_list; }; +static inline struct mlxsw_sp_port * +mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + + local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, + lag_id, port_index); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; +} + +static inline bool +mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port->vport.vfid; +} + +static inline struct net_device * +mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->br_dev; +} + +static inline u16 +mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vid; +} + +static inline u16 +mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->vfid; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid) + return mlxsw_sp_vport; + } + + return NULL; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) + return mlxsw_sp_vport; + } + + return NULL; +} + enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BM, @@ -118,5 +252,7 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 617fb22b5d81..ffe894e6d287 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -51,12 +51,50 @@ #include "core.h" #include "reg.h" +static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) +{ + u16 fid = vid; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + fid = mlxsw_sp_vfid_to_fid(vfid); + } + + if (!fid) + fid = mlxsw_sp_port->pvid; + + return fid; +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_orig_get(struct net_device *dev, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid; + + if (!is_vlan_dev(dev)) + return mlxsw_sp_port; + + vid = vlan_dev_vlan_id(dev); + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + WARN_ON(!mlxsw_sp_vport); + + return mlxsw_sp_vport; +} + static int mlxsw_sp_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); @@ -105,8 +143,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!spms_pl) return -ENOMEM; mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } else { + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); kfree(spms_pl); @@ -124,22 +168,38 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } +static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) +{ + return vfid >= MLXSW_SP_VFID_PORT_MAX; +} + static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid_begin, u16 fid_end, bool set, + u16 idx_begin, u16 idx_end, bool set, bool only_uc) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 range = fid_end - fid_begin + 1; + u16 local_port = mlxsw_sp_port->local_port; + enum mlxsw_flood_table_type table_type; + u16 range = idx_end - idx_begin + 1; char *sftr_pl; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; + if (mlxsw_sp_vfid_is_vport_br(idx_begin)) + local_port = mlxsw_sp_port->local_port; + else + local_port = MLXSW_PORT_CPU_PORT; + } else { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + } + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto buffer_out; @@ -150,9 +210,8 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, if (only_uc) goto buffer_out; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: @@ -167,6 +226,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, last_visited_vid; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, + set, true); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, true); @@ -185,6 +251,16 @@ err_port_flood_set: return err; } +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc) +{ + /* In case of vFIDs, index into the flooding table is relative to + * the start of the vFIDs range. + */ + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, + only_uc); +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, unsigned long brport_flags) @@ -193,6 +269,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, bool set; int err; + if (!mlxsw_sp_port->bridged) + return -EINVAL; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -237,6 +316,22 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } +static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool vlan_enabled) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + /* SWITCHDEV_TRANS_PREPARE phase */ + if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { + netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); + return -EINVAL; + } + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -244,6 +339,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, @@ -257,6 +356,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.vlan_filtering); + break; default: err = -EOPNOTSUPP; break; @@ -304,7 +408,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (mlxsw_sp_port->nr_vfids) + if (!list_empty(&mlxsw_sp_port->vports_list)) mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; else mt = MLXSW_REG_SVFA_MT_VID_TO_FID; @@ -316,7 +420,7 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (!mlxsw_sp_port->nr_vfids) + if (list_empty(&mlxsw_sp_port->vports_list)) return 0; mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; @@ -342,14 +446,35 @@ err_port_add_vid: return err; } +static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, bool is_member, + bool untagged) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, + is_member, untagged); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, last_visited_vid, old_pvid; enum mlxsw_reg_svfa_mt mt; - u16 vid, vid_e; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -377,15 +502,18 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) { netdev_err(dev, "Failed to create FID=VID=%d mapping\n", vid); - return err; + goto err_port_vid_to_fid_set; } } + } - /* Set FID mapping according to port's mode */ + /* Set FID mapping according to port's mode */ + for (vid = vid_begin; vid <= vid_end; vid++) { err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); if (err) { netdev_err(dev, "Failed to map FID=%d", vid); - return err; + last_visited_vid = --vid; + goto err_port_fid_map; } } @@ -393,83 +521,133 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, true, false); if (err) { netdev_err(dev, "Failed to configure flooding\n"); - return err; + goto err_port_flood_set; } - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, - flag_untagged); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + true, flag_untagged); + if (err) { + netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, + vid_end); + goto err_port_vlans_set; } - vid = vid_begin; - if (flag_pvid && mlxsw_sp_port->pvid != vid) { - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + old_pvid = mlxsw_sp_port->pvid; + if (flag_pvid && old_pvid != vid_begin) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", - vid); - return err; + netdev_err(dev, "Unable to add PVID %d\n", vid_begin); + goto err_port_pvid_set; } - mlxsw_sp_port->pvid = vid; + mlxsw_sp_port->pvid = vid_begin; } /* Changing activity bits only if HW operation succeded */ - for (vid = vid_begin; vid <= vid_end; vid++) + for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); + if (flag_untagged) + set_bit(vid, mlxsw_sp_port->untagged_vlans); + else + clear_bit(vid, mlxsw_sp_port->untagged_vlans); + } - return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, - mlxsw_sp_port->stp_state); + /* STP state change must be done after we set active VLANs */ + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, + mlxsw_sp_port->stp_state); + if (err) { + netdev_err(dev, "Failed to set STP state\n"); + goto err_port_stp_state_set; + } + + return 0; + +err_port_vid_to_fid_set: + mlxsw_sp_fid_destroy(mlxsw_sp, vid); + return err; + +err_port_stp_state_set: + for (vid = vid_begin; vid <= vid_end; vid++) + clear_bit(vid, mlxsw_sp_port->active_vlans); + if (old_pvid != mlxsw_sp_port->pvid) + mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); +err_port_pvid_set: + __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_vlans_set: + __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_flood_set: + last_visited_vid = vid_end; +err_port_fid_map: + for (vid = last_visited_vid; vid >= vid_begin; vid--) + mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + return err; } static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; if (switchdev_trans_ph_prepare(trans)) return 0; return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, - untagged_flag, pvid_flag); + flag_untagged, flag_pvid); +} + +static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) +{ + return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; +} + +static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) +{ + return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : + MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, - const char *mac, u16 vid, bool adding, - bool dynamic) +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + bool dynamic) { - enum mlxsw_reg_sfd_rec_policy policy; - enum mlxsw_reg_sfd_op op; char *sfd_pl; int err; - if (!vid) - vid = mlxsw_sp_port->pvid; + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + + return err; +} + +static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, + const char *mac, u16 fid, u16 lag_vid, + bool adding, bool dynamic) +{ + char *sfd_pl; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; - policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; - op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : - MLXSW_REG_SFD_OP_WRITE_REMOVE; - mlxsw_reg_sfd_pack(sfd_pl, op, 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, - mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, - mlxsw_sp_port->local_port); - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), - sfd_pl); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + lag_vid, lag_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; @@ -480,11 +658,162 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, true, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + true, false); +} + +static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, + u16 fid, u16 mid, bool adding) +{ + char *sfd_pl; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, mid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, + bool add, bool clear_all_ports) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err, i; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); + if (clear_all_ports) { + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + + list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { + if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) + return mid; + } + return NULL; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + u16 mid_idx; + + mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, + MLXSW_SP_MID_MAX); + if (mid_idx == MLXSW_SP_MID_MAX) + return NULL; + + mid = kzalloc(sizeof(*mid), GFP_KERNEL); + if (!mid) + return NULL; + + set_bit(mid_idx, mlxsw_sp->br_mids.mapped); + ether_addr_copy(mid->addr, addr); + mid->vid = vid; + mid->mid = mid_idx; + mid->ref_count = 0; + list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); + + return mid; +} + +static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + if (--mid->ref_count == 0) { + list_del(&mid->list); + clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); + kfree(mid); + return 1; + } + return 0; +} + +static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + int err = 0; + if (switchdev_trans_ph_prepare(trans)) return 0; - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - true, false); + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to allocate MC group\n"); + return -ENOMEM; + } + } + mid->ref_count++; + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, + mid->ref_count == 1); + if (err) { + netdev_err(dev, "Unable to set SMID\n"); + goto err_out; + } + + if (mid->ref_count == 1) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, + true); + if (err) { + netdev_err(dev, "Unable to set MC SFD\n"); + goto err_out; + } + } + + return 0; + +err_out: + __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + return err; } static int mlxsw_sp_port_obj_add(struct net_device *dev, @@ -494,8 +823,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); @@ -505,6 +841,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_FDB(obj), trans); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; default: err = -EOPNOTSUPP; break; @@ -532,7 +873,7 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool init) { struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, vid_e; + u16 vid, pvid; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -542,30 +883,23 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (!init && !mlxsw_sp_port->bridged) return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, - false); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); + if (err) { + netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, + vid_end); + return err; } - if ((mlxsw_sp_port->pvid >= vid_begin) && - (mlxsw_sp_port->pvid <= vid_end)) { + pvid = mlxsw_sp_port->pvid; + if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { /* Default VLAN is always 1 */ - mlxsw_sp_port->pvid = 1; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, - mlxsw_sp_port->pvid); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", - vid); + netdev_err(dev, "Unable to del PVID %d\n", pvid); return err; } + mlxsw_sp_port->pvid = 1; } if (init) @@ -606,8 +940,54 @@ static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) { - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - false, false); + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, + false, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + false, false); +} + +static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 mid_idx; + int err = 0; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to remove port from MC DB\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + + mid_idx = mid->mid; + if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, + false); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + } + + return err; } static int mlxsw_sp_port_obj_del(struct net_device *dev, @@ -616,8 +996,15 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; @@ -625,6 +1012,9 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj)); default: err = -EOPNOTSUPP; break; @@ -633,14 +1023,31 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, return err; } +static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, + u16 lag_id) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (mlxsw_sp_port) + return mlxsw_sp_port; + } + return NULL; +} + static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 vport_vid = 0, vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; - u16 vid; + u16 fid; u8 local_port; + u16 lag_id; u8 num_rec; int stored_err = 0; int i; @@ -650,11 +1057,19 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; + mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 tmp; + + tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + vport_fid = mlxsw_sp_vfid_to_fid(tmp); + vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); - err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(sfd), sfd_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) goto out; @@ -669,21 +1084,46 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < num_rec; i++) { switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { case MLXSW_REG_SFD_REC_TYPE_UNICAST: - mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + err = cb(&fdb->obj); + if (err) + stored_err = err; + } + break; + case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: + mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, + mac, &fid, &lag_id); + if (mlxsw_sp_port == + mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; - fdb->vid = vid; err = cb(&fdb->obj); if (err) stored_err = err; } + break; } } } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); out: + mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); kfree(sfd_pl); return stored_err ? stored_err : err; } @@ -695,10 +1135,19 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid; int err = 0; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vlan->flags = 0; + vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + return cb(&vlan->obj); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { vlan->flags = 0; if (vid == mlxsw_sp_port->pvid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; + if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; vlan->vid_begin = vid; vlan->vid_end = vid; err = cb(&vlan->obj); @@ -715,6 +1164,10 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, @@ -740,6 +1193,21 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; +static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, + bool adding, char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + if (learning && learning_sync) { + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + call_switchdev_notifiers(notifier_type, dev, &info.info); + } +} + static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index, bool adding) @@ -747,34 +1215,119 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port; char mac[ETH_ALEN]; u8 local_port; - u16 vid; + u16 vid, fid; + bool do_notification = true; int err; - mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); - return; + goto just_remove; + } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; } - err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, - adding && mlxsw_sp_port->learning, true); + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, + adding, true); if (err) { if (net_ratelimit()) netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); return; } - if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { - struct switchdev_notifier_fdb_info info; - unsigned long notifier_type; + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, mlxsw_sp_port->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; +} - info.addr = mac; - info.vid = vid; - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; - call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, - &info.info); +static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index, + bool adding) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char mac[ETH_ALEN]; + u16 lag_vid = 0; + u16 lag_id; + u16 vid, fid; + bool do_notification = true; + int err; + + mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); + mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (!mlxsw_sp_port) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); + goto just_remove; } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + lag_vid = vid; + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; + } + + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, + adding, true); + if (err) { + if (net_ratelimit()) + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); + return; + } + + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, + mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; } static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, @@ -789,6 +1342,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, rec_index, false); break; + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; } } @@ -812,6 +1373,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + mutex_lock(&mlxsw_sp->fdb_lock); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -824,6 +1386,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); + mutex_unlock(&mlxsw_sp->fdb_lock); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -838,6 +1401,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); return err; } + mutex_init(&mlxsw_sp->fdb_lock); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -877,7 +1441,8 @@ int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) * with VID 1. */ mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, + true); if (err) { netdev_err(dev, "Unable to init VLANs\n"); return err; diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 2056b719c262..7df318346b05 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -600,22 +600,11 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv) static int encx24j600_hw_init(struct encx24j600_priv *priv) { - struct net_device *dev = priv->ndev; int ret = 0; - u16 eidled; u16 macon2; priv->hw_enabled = false; - eidled = encx24j600_read_reg(priv, EIDLED); - if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { - ret = -EINVAL; - goto err_out; - } - - netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n", - (eidled & REVID_MASK) >> REVID_SHIFT); - /* PHY Leds: link status, * LEDA: Link State + collision events * LEDB: Link State + transmit/receive events @@ -655,7 +644,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv) if (netif_msg_hw(priv)) encx24j600_dump_config(priv, "Hw is initialized"); -err_out: return ret; } @@ -1004,6 +992,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) struct net_device *ndev; struct encx24j600_priv *priv; + u16 eidled; ndev = alloc_etherdev(sizeof(struct encx24j600_priv)); @@ -1072,10 +1061,21 @@ static int encx24j600_spi_probe(struct spi_device *spi) goto out_free; } + eidled = encx24j600_read_reg(priv, EIDLED); + if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { + ret = -EINVAL; + goto out_unregister; + } + + netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n", + (eidled & REVID_MASK) >> REVID_SHIFT); + netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr); return ret; +out_unregister: + unregister_netdev(priv->ndev); out_free: free_netdev(ndev); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 83651ac8ddb9..270c9eeb7ab6 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1488,7 +1488,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - skb_mark_napi_id(skb, &ss->napi); if (polling) { int hlen; @@ -1506,6 +1505,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) skb->data_len -= hlen; skb->tail += hlen; skb->protocol = eth_type_trans(skb, dev); + skb_mark_napi_id(skb, &ss->napi); netif_receive_skb(skb); } else @@ -3814,7 +3814,6 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) ss->dev = mgp->dev; netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, myri10ge_napi_weight); - napi_hash_add(&ss->napi); } return 0; abort: diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index b83f7c0fcf99..122c2ee3dfe2 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -1937,6 +1937,12 @@ static void refill_rx(struct net_device *dev) break; /* Better luck next round. */ np->rx_dma[entry] = pci_map_single(np->pci_dev, skb->data, buflen, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->rx_dma[entry])) { + dev_kfree_skb_any(skb); + np->rx_skbuff[entry] = NULL; + break; /* Better luck next round. */ + } np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); } np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); @@ -2093,6 +2099,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) np->tx_skbuff[entry] = skb; np->tx_dma[entry] = pci_map_single(np->pci_dev, skb->data,skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) { + np->tx_skbuff[entry] = NULL; + dev_kfree_skb_irq(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig new file mode 100644 index 000000000000..9508ad782c30 --- /dev/null +++ b/drivers/net/ethernet/netronome/Kconfig @@ -0,0 +1,36 @@ +# +# Netronome device configuration +# + +config NET_VENDOR_NETRONOME + bool "Netronome(R) devices" + default y + ---help--- + If you have a Netronome(R) network (Ethernet) card or device, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Netronome(R) cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_NETRONOME + +config NFP_NETVF + tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver" + depends on PCI && PCI_MSI + depends on VXLAN || VXLAN=n + ---help--- + This driver supports SR-IOV virtual functions of + the Netronome(R) NFP4000/NFP6000 cards working as + a advanced Ethernet NIC. + +config NFP_NET_DEBUG + bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers" + depends on NFP_NET || NFP_NETVF + ---help--- + Enable extra sanity checks and debugfs support in + Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers. + Note: selecting this option may adversely impact + performance. + +endif diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile new file mode 100644 index 000000000000..dcb7b383f634 --- /dev/null +++ b/drivers/net/ethernet/netronome/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Netronome network device drivers +# + +obj-$(CONFIG_NFP_NETVF) += nfp/ diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile new file mode 100644 index 000000000000..68178819ff12 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o + +nfp_netvf-objs := \ + nfp_net_common.o \ + nfp_net_ethtool.o \ + nfp_netvf_main.o + +nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h new file mode 100644 index 000000000000..ab264e1bccd0 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net.h + * Declarations for Netronome network device driver. + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#ifndef _NFP_NET_H_ +#define _NFP_NET_H_ + +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <asm-generic/io-64-nonatomic-hi-lo.h> + +#include "nfp_net_ctrl.h" + +#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) +#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) +#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) +#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) +#define nn_warn_ratelimit(nn, fmt, args...) \ + do { \ + if (unlikely(net_ratelimit())) \ + netdev_warn((nn)->netdev, fmt, ## args); \ + } while (0) + +/* Max time to wait for NFP to respond on updates (in ms) */ +#define NFP_NET_POLL_TIMEOUT 5000 + +/* Bar allocation */ +#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_Q0_BAR 2 +#define NFP_NET_Q1_BAR 4 /* OBSOLETE */ + +/* Max bits in DMA address */ +#define NFP_NET_MAX_DMA_BITS 40 + +/* Default size for MTU and freelist buffer sizes */ +#define NFP_NET_DEFAULT_MTU 1500 +#define NFP_NET_DEFAULT_RX_BUFSZ 2048 + +/* Maximum number of bytes prepended to a packet */ +#define NFP_NET_MAX_PREPEND 64 + +/* Interrupt definitions */ +#define NFP_NET_NON_Q_VECTORS 2 +#define NFP_NET_IRQ_LSC_IDX 0 +#define NFP_NET_IRQ_EXN_IDX 1 + +/* Queue/Ring definitions */ +#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ +#define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */ + +#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */ +#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */ +#define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */ +#define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */ + +#define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */ +#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */ + +#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */ + +/* Offload definitions */ +#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16)) + +/* Forward declarations */ +struct nfp_net; +struct nfp_net_r_vector; + +/* Convenience macro for writing dma address into RX/TX descriptors */ +#define nfp_desc_set_dma_addr(desc, dma_addr) \ + do { \ + __typeof(desc) __d = (desc); \ + dma_addr_t __addr = (dma_addr); \ + \ + __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \ + __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \ + } while (0) + +/* TX descriptor format */ + +#define PCIE_DESC_TX_EOP BIT(7) +#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0) +#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0) + +/* Flags in the host TX descriptor */ +#define PCIE_DESC_TX_CSUM BIT(7) +#define PCIE_DESC_TX_IP4_CSUM BIT(6) +#define PCIE_DESC_TX_TCP_CSUM BIT(5) +#define PCIE_DESC_TX_UDP_CSUM BIT(4) +#define PCIE_DESC_TX_VLAN BIT(3) +#define PCIE_DESC_TX_LSO BIT(2) +#define PCIE_DESC_TX_ENCAP BIT(1) +#define PCIE_DESC_TX_O_IP4_CSUM BIT(0) + +struct nfp_net_tx_desc { + union { + struct { + u8 dma_addr_hi; /* High bits of host buf address */ + __le16 dma_len; /* Length to DMA for this desc */ + u8 offset_eop; /* Offset in buf where pkt starts + + * highest bit is eop flag. + */ + __le32 dma_addr_lo; /* Low 32bit of host buf addr */ + + __le16 mss; /* MSS to be used for LSO */ + u8 l4_offset; /* LSO, where the L4 data starts */ + u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + __le16 vlan; /* VLAN tag to add if indicated */ + __le16 data_len; /* Length of frame + meta data */ + } __packed; + __le32 vals[4]; + }; +}; + +/** + * struct nfp_net_tx_buf - software TX buffer descriptor + * @skb: sk_buff associated with this buffer + * @dma_addr: DMA mapping address of the buffer + * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags) + * @pkt_cnt: Number of packets to be produced out of the skb associated + * with this buffer (valid only on the head's buffer). + * Will be 1 for all non-TSO packets. + * @real_len: Number of bytes which to be produced out of the skb (valid only + * on the head's buffer). Equal to skb->len for non-TSO packets. + */ +struct nfp_net_tx_buf { + struct sk_buff *skb; + dma_addr_t dma_addr; + short int fidx; + u16 pkt_cnt; + u32 real_len; +}; + +/** + * struct nfp_net_tx_ring - TX ring structure + * @r_vec: Back pointer to ring vector structure + * @idx: Ring index from Linux's perspective + * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue + * @qcp_q: Pointer to base of the QCP TX queue + * @cnt: Size of the queue in number of descriptors + * @wr_p: TX ring write pointer (free running) + * @rd_p: TX ring read pointer (free running) + * @qcp_rd_p: Local copy of QCP TX queue read pointer + * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer + * (used for .xmit_more delayed kick) + * @txbufs: Array of transmitted TX buffers, to free on transmit + * @txds: Virtual address of TX ring in host memory + * @dma: DMA address of the TX ring + * @size: Size, in bytes, of the TX ring (needed to free) + */ +struct nfp_net_tx_ring { + struct nfp_net_r_vector *r_vec; + + u32 idx; + int qcidx; + u8 __iomem *qcp_q; + + u32 cnt; + u32 wr_p; + u32 rd_p; + u32 qcp_rd_p; + + u32 wr_ptr_add; + + struct nfp_net_tx_buf *txbufs; + struct nfp_net_tx_desc *txds; + + dma_addr_t dma; + unsigned int size; +} ____cacheline_aligned; + +/* RX and freelist descriptor format */ + +#define PCIE_DESC_RX_DD BIT(7) +#define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0) + +/* Flags in the RX descriptor */ +#define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15)) +#define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14)) +#define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13)) +#define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12)) +#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11)) +#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10)) +#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9)) +#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8)) +#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7)) +#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6)) +#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5)) +#define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4)) +#define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3)) +#define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2)) +#define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1)) +#define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0)) + +#define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \ + PCIE_DESC_RX_TCP_CSUM | \ + PCIE_DESC_RX_UDP_CSUM | \ + PCIE_DESC_RX_I_IP4_CSUM | \ + PCIE_DESC_RX_I_TCP_CSUM | \ + PCIE_DESC_RX_I_UDP_CSUM) +#define PCIE_DESC_RX_CSUM_OK_SHIFT 1 +#define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL) +#define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \ + PCIE_DESC_RX_CSUM_OK_SHIFT) + +struct nfp_net_rx_desc { + union { + struct { + u8 dma_addr_hi; /* High bits of the buf address */ + __le16 reserved; /* Must be zero */ + u8 meta_len_dd; /* Must be zero */ + + __le32 dma_addr_lo; /* Low bits of the buffer address */ + } __packed fld; + + struct { + __le16 data_len; /* Length of the frame + meta data */ + u8 reserved; + u8 meta_len_dd; /* Length of meta data prepended + + * descriptor done flag. + */ + + __le16 flags; /* RX flags. See @PCIE_DESC_RX_* */ + __le16 vlan; /* VLAN if stripped */ + } __packed rxd; + + __le32 vals[2]; + }; +}; + +struct nfp_net_rx_hash { + __be32 hash_type; + __be32 hash; +}; + +/** + * struct nfp_net_rx_buf - software RX buffer descriptor + * @skb: sk_buff associated with this buffer + * @dma_addr: DMA mapping address of the buffer + */ +struct nfp_net_rx_buf { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +/** + * struct nfp_net_rx_ring - RX ring structure + * @r_vec: Back pointer to ring vector structure + * @cnt: Size of the queue in number of descriptors + * @wr_p: FL/RX ring write pointer (free running) + * @rd_p: FL/RX ring read pointer (free running) + * @idx: Ring index from Linux's perspective + * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist + * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue + * @qcp_fl: Pointer to base of the QCP freelist queue + * @qcp_rx: Pointer to base of the QCP RX queue + * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer + * (used for free list batching) + * @rxbufs: Array of transmitted FL/RX buffers + * @rxds: Virtual address of FL/RX ring in host memory + * @dma: DMA address of the FL/RX ring + * @size: Size, in bytes, of the FL/RX ring (needed to free) + */ +struct nfp_net_rx_ring { + struct nfp_net_r_vector *r_vec; + + u32 cnt; + u32 wr_p; + u32 rd_p; + + u16 idx; + u16 wr_ptr_add; + + int fl_qcidx; + int rx_qcidx; + u8 __iomem *qcp_fl; + u8 __iomem *qcp_rx; + + struct nfp_net_rx_buf *rxbufs; + struct nfp_net_rx_desc *rxds; + + dma_addr_t dma; + unsigned int size; +} ____cacheline_aligned; + +/** + * struct nfp_net_r_vector - Per ring interrupt vector configuration + * @nfp_net: Backpointer to nfp_net structure + * @napi: NAPI structure for this ring vec + * @tx_ring: Pointer to TX ring + * @rx_ring: Pointer to RX ring + * @irq_idx: Index into MSI-X table + * @rx_sync: Seqlock for atomic updates of RX stats + * @rx_pkts: Number of received packets + * @rx_bytes: Number of received bytes + * @rx_drops: Number of packets dropped on RX due to lack of resources + * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK + * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK + * @hw_csum_rx_error: Counter of packets with bad checksums + * @tx_sync: Seqlock for atomic updates of TX stats + * @tx_pkts: Number of Transmitted packets + * @tx_bytes: Number of Transmitted bytes + * @hw_csum_tx: Counter of packets with TX checksum offload requested + * @hw_csum_tx_inner: Counter of inner TX checksum offload requests + * @tx_gather: Counter of packets with Gather DMA + * @tx_lso: Counter of LSO packets sent + * @tx_errors: How many TX errors were encountered + * @tx_busy: How often was TX busy (no space)? + * @handler: Interrupt handler for this ring vector + * @name: Name of the interrupt vector + * @affinity_mask: SMP affinity mask for this vector + * + * This structure ties RX and TX rings to interrupt vectors and a NAPI + * context. This currently only supports one RX and TX ring per + * interrupt vector but might be extended in the future to allow + * association of multiple rings per vector. + */ +struct nfp_net_r_vector { + struct nfp_net *nfp_net; + struct napi_struct napi; + + struct nfp_net_tx_ring *tx_ring; + struct nfp_net_rx_ring *rx_ring; + + int irq_idx; + + struct u64_stats_sync rx_sync; + u64 rx_pkts; + u64 rx_bytes; + u64 rx_drops; + u64 hw_csum_rx_ok; + u64 hw_csum_rx_inner_ok; + u64 hw_csum_rx_error; + + struct u64_stats_sync tx_sync; + u64 tx_pkts; + u64 tx_bytes; + u64 hw_csum_tx; + u64 hw_csum_tx_inner; + u64 tx_gather; + u64 tx_lso; + u64 tx_errors; + u64 tx_busy; + + irq_handler_t handler; + char name[IFNAMSIZ + 8]; + cpumask_t affinity_mask; +} ____cacheline_aligned; + +/* Firmware version as it is written in the 32bit value in the BAR */ +struct nfp_net_fw_version { + u8 minor; + u8 major; + u8 class; + u8 resv; +} __packed; + +static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, + u8 resv, u8 class, u8 major, u8 minor) +{ + return fw_ver->resv == resv && + fw_ver->class == class && + fw_ver->major == major && + fw_ver->minor == minor; +} + +/** + * struct nfp_net - NFP network device structure + * @pdev: Backpointer to PCI device + * @netdev: Backpointer to net_device structure + * @nfp_fallback: Is the driver used in fallback mode? + * @is_vf: Is the driver attached to a VF? + * @is_nfp3200: Is the driver for a NFP-3200 card? + * @fw_loaded: Is the firmware loaded? + * @ctrl: Local copy of the control register/word. + * @fl_bufsz: Currently configured size of the freelist buffers + * @rx_offset: Offset in the RX buffers where packet data starts + * @cpp: Pointer to the CPP handle + * @nfp_dev_cpp: Pointer to the NFP Device handle + * @ctrl_area: Pointer to the CPP area for the control BAR + * @tx_area: Pointer to the CPP area for the TX queues + * @rx_area: Pointer to the CPP area for the FL/RX queues + * @fw_ver: Firmware version + * @cap: Capabilities advertised by the Firmware + * @max_mtu: Maximum support MTU advertised by the Firmware + * @rss_cfg: RSS configuration + * @rss_key: RSS secret key + * @rss_itbl: RSS indirection table + * @max_tx_rings: Maximum number of TX rings supported by the Firmware + * @max_rx_rings: Maximum number of RX rings supported by the Firmware + * @num_tx_rings: Currently configured number of TX rings + * @num_rx_rings: Currently configured number of RX rings + * @txd_cnt: Size of the TX ring in number of descriptors + * @rxd_cnt: Size of the RX ring in number of descriptors + * @tx_rings: Array of pre-allocated TX ring structures + * @rx_rings: Array of pre-allocated RX ring structures + * @num_irqs: Number of allocated interrupt vectors + * @num_r_vecs: Number of used ring vectors + * @r_vecs: Pre-allocated array of ring vectors + * @irq_entries: Pre-allocated array of MSI-X entries + * @lsc_handler: Handler for Link State Change interrupt + * @lsc_name: Name for Link State Change interrupt + * @exn_handler: Handler for Exception interrupt + * @exn_name: Name for Exception interrupt + * @shared_handler: Handler for shared interrupts + * @shared_name: Name for shared interrupt + * @me_freq_mhz: ME clock_freq (MHz) + * @reconfig_lock: Protects HW reconfiguration request regs/machinery + * @link_up: Is the link up? + * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading + * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter + * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter + * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter + * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter + * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW + * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts + * @qcp_cfg: Pointer to QCP queue used for configuration notification + * @ctrl_bar: Pointer to mapped control BAR + * @tx_bar: Pointer to mapped TX queues + * @rx_bar: Pointer to mapped FL/RX queues + * @debugfs_dir: Device directory in debugfs + */ +struct nfp_net { + struct pci_dev *pdev; + struct net_device *netdev; + + unsigned nfp_fallback:1; + unsigned is_vf:1; + unsigned is_nfp3200:1; + unsigned fw_loaded:1; + + u32 ctrl; + u32 fl_bufsz; + + u32 rx_offset; + +#ifdef CONFIG_PCI_IOV + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; +#endif + + struct nfp_cpp *cpp; + struct platform_device *nfp_dev_cpp; + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *tx_area; + struct nfp_cpp_area *rx_area; + + struct nfp_net_fw_version fw_ver; + u32 cap; + u32 max_mtu; + + u32 rss_cfg; + u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; + u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; + + int max_tx_rings; + int max_rx_rings; + + int num_tx_rings; + int num_rx_rings; + + int stride_tx; + int stride_rx; + + int txd_cnt; + int rxd_cnt; + + struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS]; + struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS]; + + u8 num_irqs; + u8 num_r_vecs; + struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS]; + struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + + NFP_NET_MAX_TX_RINGS]; + + irq_handler_t lsc_handler; + char lsc_name[IFNAMSIZ + 8]; + + irq_handler_t exn_handler; + char exn_name[IFNAMSIZ + 8]; + + irq_handler_t shared_handler; + char shared_name[IFNAMSIZ + 8]; + + u32 me_freq_mhz; + + bool link_up; + spinlock_t link_status_lock; + + spinlock_t reconfig_lock; + + u32 rx_coalesce_usecs; + u32 rx_coalesce_max_frames; + u32 tx_coalesce_usecs; + u32 tx_coalesce_max_frames; + + __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS]; + u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS]; + + u8 __iomem *qcp_cfg; + + u8 __iomem *ctrl_bar; + u8 __iomem *q_bar; + u8 __iomem *tx_bar; + u8 __iomem *rx_bar; + + struct dentry *debugfs_dir; +}; + +/* Functions to read/write from/to a BAR + * Performs any endian conversion necessary. + */ +static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) +{ + writeb(val, nn->ctrl_bar + off); +} + +/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */ + +static inline u32 nn_readl(struct nfp_net *nn, int off) +{ + return readl(nn->ctrl_bar + off); +} + +static inline void nn_writel(struct nfp_net *nn, int off, u32 val) +{ + writel(val, nn->ctrl_bar + off); +} + +static inline u64 nn_readq(struct nfp_net *nn, int off) +{ + return readq(nn->ctrl_bar + off); +} + +static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) +{ + writeq(val, nn->ctrl_bar + off); +} + +/* Flush posted PCI writes by reading something without side effects */ +static inline void nn_pci_flush(struct nfp_net *nn) +{ + nn_readl(nn, NFP_NET_CFG_VERSION); +} + +/* Queue Controller Peripheral access functions and definitions. + * + * Some of the BARs of the NFP are mapped to portions of the Queue + * Controller Peripheral (QCP) address space on the NFP. A QCP queue + * has a read and a write pointer (as well as a size and flags, + * indicating overflow etc). The QCP offers a number of different + * operation on queue pointers, but here we only offer function to + * either add to a pointer or to read the pointer value. + */ +#define NFP_QCP_QUEUE_ADDR_SZ 0x800 +#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ) +#define NFP_QCP_QUEUE_ADD_RPTR 0x0000 +#define NFP_QCP_QUEUE_ADD_WPTR 0x0004 +#define NFP_QCP_QUEUE_STS_LO 0x0008 +#define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff +#define NFP_QCP_QUEUE_STS_HI 0x000c +#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff + +/* The offset of a QCP queues in the PCIe Target (same on NFP3200 and NFP6000 */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) + +/* nfp_qcp_ptr - Read or Write Pointer of a queue */ +enum nfp_qcp_ptr { + NFP_QCP_READ_PTR = 0, + NFP_QCP_WRITE_PTR +}; + +/* There appear to be an *undocumented* upper limit on the value which + * one can add to a queue and that value is either 0x3f or 0x7f. We + * go with 0x3f as a conservative measure. + */ +#define NFP_QCP_MAX_ADD 0x3f + +static inline void _nfp_qcp_ptr_add(u8 __iomem *q, + enum nfp_qcp_ptr ptr, u32 val) +{ + u32 off; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_ADD_RPTR; + else + off = NFP_QCP_QUEUE_ADD_WPTR; + + while (val > NFP_QCP_MAX_ADD) { + writel(NFP_QCP_MAX_ADD, q + off); + val -= NFP_QCP_MAX_ADD; + } + + writel(val, q + off); +} + +/** + * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue + * + * @q: Base address for queue structure + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val) +{ + _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val); +} + +/** + * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue + * + * @q: Base address for queue structure + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val) +{ + _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val); +} + +static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr) +{ + u32 off; + u32 val; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_STS_LO; + else + off = NFP_QCP_QUEUE_STS_HI; + + val = readl(q + off); + + if (ptr == NFP_QCP_READ_PTR) + return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; + else + return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; +} + +/** + * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue + * @q: Base address for queue structure + * + * Return: Value read. + */ +static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q) +{ + return _nfp_qcp_read(q, NFP_QCP_READ_PTR); +} + +/** + * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue + * @q: Base address for queue structure + * + * Return: Value read. + */ +static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) +{ + return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR); +} + +/* Globals */ +extern const char nfp_net_driver_name[]; +extern const char nfp_net_driver_version[]; + +/* Prototypes */ +void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, + void __iomem *ctrl_bar); + +struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, + int max_tx_rings, int max_rx_rings); +void nfp_net_netdev_free(struct nfp_net *nn); +int nfp_net_netdev_init(struct net_device *netdev); +void nfp_net_netdev_clean(struct net_device *netdev); +void nfp_net_set_ethtool_ops(struct net_device *netdev); +void nfp_net_info(struct nfp_net *nn); +int nfp_net_reconfig(struct nfp_net *nn, u32 update); +void nfp_net_rss_write_itbl(struct nfp_net *nn); +void nfp_net_rss_write_key(struct nfp_net *nn); +void nfp_net_coalesce_write_cfg(struct nfp_net *nn); +int nfp_net_irqs_alloc(struct nfp_net *nn); +void nfp_net_irqs_disable(struct nfp_net *nn); + +#ifdef CONFIG_NFP_NET_DEBUG +void nfp_net_debugfs_create(void); +void nfp_net_debugfs_destroy(void); +void nfp_net_debugfs_adapter_add(struct nfp_net *nn); +void nfp_net_debugfs_adapter_del(struct nfp_net *nn); +#else +static inline void nfp_net_debugfs_create(void) +{ +} + +static inline void nfp_net_debugfs_destroy(void) +{ +} + +static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +{ +} + +static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +{ +} +#endif /* CONFIG_NFP_NET_DEBUG */ + +#endif /* _NFP_NET_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c new file mode 100644 index 000000000000..43c618bafdb6 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -0,0 +1,2435 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_common.c + * Netronome network device driver: Common functions between PF and VF + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + * Chris Telfer <chris.telfer@netronome.com> + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/msi.h> +#include <linux/ethtool.h> +#include <linux/log2.h> +#include <linux/if_vlan.h> +#include <linux/random.h> + +#include <linux/ktime.h> + +#include <net/vxlan.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +/** + * nfp_net_get_fw_version() - Read and parse the FW version + * @fw_ver: Output fw_version structure to read to + * @ctrl_bar: Mapped address of the control BAR + */ +void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, + void __iomem *ctrl_bar) +{ + u32 reg; + + reg = readl(ctrl_bar + NFP_NET_CFG_VERSION); + put_unaligned_le32(reg, fw_ver); +} + +/** + * nfp_net_reconfig() - Reconfigure the firmware + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. The + * poll until the firmware has acknowledged the update by zeroing the + * update word. + * + * Return: Negative errno on error, 0 on success + */ +int nfp_net_reconfig(struct nfp_net *nn, u32 update) +{ + int cnt, ret = 0; + u32 new; + + spin_lock_bh(&nn->reconfig_lock); + + nn_writel(nn, NFP_NET_CFG_UPDATE, update); + /* ensure update is written before pinging HW */ + nn_pci_flush(nn); + nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + + /* Poll update field, waiting for NFP to ack the config */ + for (cnt = 0; ; cnt++) { + new = nn_readl(nn, NFP_NET_CFG_UPDATE); + if (new == 0) + break; + if (new & NFP_NET_CFG_UPDATE_ERR) { + nn_err(nn, "Reconfig error: 0x%08x\n", new); + ret = -EIO; + break; + } else if (cnt >= NFP_NET_POLL_TIMEOUT) { + nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n", + update, cnt); + ret = -EIO; + break; + } + mdelay(1); + } + + spin_unlock_bh(&nn->reconfig_lock); + return ret; +} + +/* Interrupt configuration and handling + */ + +/** + * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking + * @nn: NFP Network structure + * @entry_nr: MSI-X table entry + * + * Clear the MSI-X table mask bit for the given entry bypassing Linux irq + * handling subsystem. Use *only* to reenable automasked vectors. + */ +static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr) +{ + struct list_head *msi_head = &nn->pdev->dev.msi_list; + struct msi_desc *entry; + u32 off; + + /* All MSI-Xs have the same mask_base */ + entry = list_first_entry(msi_head, struct msi_desc, list); + + off = (PCI_MSIX_ENTRY_SIZE * entry_nr) + + PCI_MSIX_ENTRY_VECTOR_CTRL; + writel(0, entry->mask_base + off); + readl(entry->mask_base); +} + +/** + * nfp_net_irq_unmask() - Unmask automasked interrupt + * @nn: NFP Network structure + * @entry_nr: MSI-X table entry + * + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) +{ + if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + nfp_net_irq_unmask_msix(nn, entry_nr); + return; + } + + nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED); + nn_pci_flush(nn); +} + +/** + * nfp_net_msix_alloc() - Try to allocate MSI-X irqs + * @nn: NFP Network structure + * @nr_vecs: Number of MSI-X vectors to allocate + * + * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors. + * + * Return: Number of MSI-X vectors obtained or 0 on error. + */ +static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs) +{ + struct pci_dev *pdev = nn->pdev; + int nvecs; + int i; + + for (i = 0; i < nr_vecs; i++) + nn->irq_entries[i].entry = i; + + nvecs = pci_enable_msix_range(pdev, nn->irq_entries, + NFP_NET_NON_Q_VECTORS + 1, nr_vecs); + if (nvecs < 0) { + nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n", + NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs); + return 0; + } + + return nvecs; +} + +/** + * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want + * @nn: NFP Network structure + * + * We want a vector per CPU (or ring), whatever is smaller plus + * NFP_NET_NON_Q_VECTORS for LSC etc. + * + * Return: Number of interrupts wanted + */ +static int nfp_net_irqs_wanted(struct nfp_net *nn) +{ + int ncpus; + int vecs; + + ncpus = num_online_cpus(); + + vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings); + vecs = min_t(int, vecs, ncpus); + + return vecs + NFP_NET_NON_Q_VECTORS; +} + +/** + * nfp_net_irqs_alloc() - allocates MSI-X irqs + * @nn: NFP Network structure + * + * Return: Number of irqs obtained or 0 on error. + */ +int nfp_net_irqs_alloc(struct nfp_net *nn) +{ + int wanted_irqs; + + wanted_irqs = nfp_net_irqs_wanted(nn); + + nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs); + if (nn->num_irqs == 0) { + nn_err(nn, "Failed to allocate MSI-X IRQs\n"); + return 0; + } + + nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS; + + if (nn->num_irqs < wanted_irqs) + nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n", + wanted_irqs, nn->num_irqs); + + return nn->num_irqs; +} + +/** + * nfp_net_irqs_disable() - Disable interrupts + * @nn: NFP Network structure + * + * Undoes what @nfp_net_irqs_alloc() does. + */ +void nfp_net_irqs_disable(struct nfp_net *nn) +{ + pci_disable_msix(nn->pdev); +} + +/** + * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings. + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) +{ + struct nfp_net_r_vector *r_vec = data; + + napi_schedule_irqoff(&r_vec->napi); + + /* The FW auto-masks any interrupt, either via the MASK bit in + * the MSI-X table or via the per entry ICR field. So there + * is no need to disable interrupts here. + */ + return IRQ_HANDLED; +} + +/** + * nfp_net_read_link_status() - Reread link status from control BAR + * @nn: NFP Network structure + */ +static void nfp_net_read_link_status(struct nfp_net *nn) +{ + unsigned long flags; + bool link_up; + u32 sts; + + spin_lock_irqsave(&nn->link_status_lock, flags); + + sts = nn_readl(nn, NFP_NET_CFG_STS); + link_up = !!(sts & NFP_NET_CFG_STS_LINK); + + if (nn->link_up == link_up) + goto out; + + nn->link_up = link_up; + + if (nn->link_up) { + netif_carrier_on(nn->netdev); + netdev_info(nn->netdev, "NIC Link is Up\n"); + } else { + netif_carrier_off(nn->netdev); + netdev_info(nn->netdev, "NIC Link is Down\n"); + } +out: + spin_unlock_irqrestore(&nn->link_status_lock, flags); +} + +/** + * nfp_net_irq_lsc() - Interrupt service routine for link state changes + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_lsc(int irq, void *data) +{ + struct nfp_net *nn = data; + + nfp_net_read_link_status(nn); + + nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX); + + return IRQ_HANDLED; +} + +/** + * nfp_net_irq_exn() - Interrupt service routine for exceptions + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_exn(int irq, void *data) +{ + struct nfp_net *nn = data; + + nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__); + /* XXX TO BE IMPLEMENTED */ + return IRQ_HANDLED; +} + +/** + * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring + * @tx_ring: TX ring structure + */ +static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + + tx_ring->qcidx = tx_ring->idx * nn->stride_tx; + tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); +} + +/** + * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring + * @rx_ring: RX ring structure + */ +static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + + rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; + rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); + + rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); + rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx); +} + +/** + * nfp_net_irqs_assign() - Assign IRQs and setup rvecs. + * @netdev: netdev structure + */ +static void nfp_net_irqs_assign(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + struct nfp_net_r_vector *r_vec; + int r; + + /* Assumes nn->num_tx_rings == nn->num_rx_rings */ + if (nn->num_tx_rings > nn->num_r_vecs) { + nn_warn(nn, "More rings (%d) than vectors (%d).\n", + nn->num_tx_rings, nn->num_r_vecs); + nn->num_tx_rings = nn->num_r_vecs; + nn->num_rx_rings = nn->num_r_vecs; + } + + nn->lsc_handler = nfp_net_irq_lsc; + nn->exn_handler = nfp_net_irq_exn; + + for (r = 0; r < nn->num_r_vecs; r++) { + r_vec = &nn->r_vecs[r]; + r_vec->nfp_net = nn; + r_vec->handler = nfp_net_irq_rxtx; + r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; + + cpumask_set_cpu(r, &r_vec->affinity_mask); + + r_vec->tx_ring = &nn->tx_rings[r]; + nn->tx_rings[r].idx = r; + nn->tx_rings[r].r_vec = r_vec; + nfp_net_tx_ring_init(r_vec->tx_ring); + + r_vec->rx_ring = &nn->rx_rings[r]; + nn->rx_rings[r].idx = r; + nn->rx_rings[r].r_vec = r_vec; + nfp_net_rx_ring_init(r_vec->rx_ring); + } +} + +/** + * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) + * @nn: NFP Network structure + * @ctrl_offset: Control BAR offset where IRQ configuration should be written + * @format: printf-style format to construct the interrupt name + * @name: Pointer to allocated space for interrupt name + * @name_sz: Size of space for interrupt name + * @vector_idx: Index of MSI-X vector used for this interrupt + * @handler: IRQ handler to register for this interrupt + */ +static int +nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, + const char *format, char *name, size_t name_sz, + unsigned int vector_idx, irq_handler_t handler) +{ + struct msix_entry *entry; + int err; + + entry = &nn->irq_entries[vector_idx]; + + snprintf(name, name_sz, format, netdev_name(nn->netdev)); + err = request_irq(entry->vector, handler, 0, name, nn); + if (err) { + nn_err(nn, "Failed to request IRQ %d (err=%d).\n", + entry->vector, err); + return err; + } + nn_writeb(nn, ctrl_offset, vector_idx); + + return 0; +} + +/** + * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN) + * @nn: NFP Network structure + * @ctrl_offset: Control BAR offset where IRQ configuration should be written + * @vector_idx: Index of MSI-X vector used for this interrupt + */ +static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, + unsigned int vector_idx) +{ + nn_writeb(nn, ctrl_offset, 0xff); + free_irq(nn->irq_entries[vector_idx].vector, nn); +} + +/* Transmit + * + * One queue controller peripheral queue is used for transmit. The + * driver en-queues packets for transmit by advancing the write + * pointer. The device indicates that packets have transmitted by + * advancing the read pointer. The driver maintains a local copy of + * the read and write pointer in @struct nfp_net_tx_ring. The driver + * keeps @wr_p in sync with the queue controller write pointer and can + * determine how many packets have been transmitted by comparing its + * copy of the read pointer @rd_p with the read pointer maintained by + * the queue controller peripheral. + */ + +/** + * nfp_net_tx_full() - Check if the TX ring is full + * @tx_ring: TX ring to check + * @dcnt: Number of descriptors that need to be enqueued (must be >= 1) + * + * This function checks, based on the *host copy* of read/write + * pointer if a given TX ring is full. The real TX queue may have + * some newly made available slots. + * + * Return: True if the ring is full. + */ +static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) +{ + return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); +} + +/* Wrappers for deciding when to stop and restart TX queues */ +static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) +{ + return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); +} + +static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) +{ + return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); +} + +/** + * nfp_net_tx_ring_stop() - stop tx ring + * @nd_q: netdev queue + * @tx_ring: driver tx queue structure + * + * Safely stop TX ring. Remember that while we are running .start_xmit() + * someone else may be cleaning the TX ring completions so we need to be + * extra careful here. + */ +static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, + struct nfp_net_tx_ring *tx_ring) +{ + netif_tx_stop_queue(nd_q); + + /* We can race with the TX completion out of NAPI so recheck */ + smp_mb(); + if (unlikely(nfp_net_tx_ring_should_wake(tx_ring))) + netif_tx_start_queue(nd_q); +} + +/** + * nfp_net_tx_tso() - Set up Tx descriptor for LSO + * @nn: NFP Net device + * @r_vec: per-ring structure + * @txbuf: Pointer to driver soft TX descriptor + * @txd: Pointer to HW TX descriptor + * @skb: Pointer to SKB + * + * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. + * Return error on packet header greater than maximum supported LSO header size. + */ +static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_tx_buf *txbuf, + struct nfp_net_tx_desc *txd, struct sk_buff *skb) +{ + u32 hdrlen; + u16 mss; + + if (!skb_is_gso(skb)) + return; + + if (!skb->encapsulation) + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + else + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + + txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs; + txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); + + mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; + txd->l4_offset = hdrlen; + txd->mss = cpu_to_le16(mss); + txd->flags |= PCIE_DESC_TX_LSO; + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_lso++; + u64_stats_update_end(&r_vec->tx_sync); +} + +/** + * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor + * @nn: NFP Net device + * @r_vec: per-ring structure + * @txbuf: Pointer to driver soft TX descriptor + * @txd: Pointer to TX descriptor + * @skb: Pointer to SKB + * + * This function sets the TX checksum flags in the TX descriptor based + * on the configuration and the protocol of the packet to be transmitted. + */ +static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_tx_buf *txbuf, + struct nfp_net_tx_desc *txd, struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 l4_hdr; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) + return; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return; + + txd->flags |= PCIE_DESC_TX_CSUM; + if (skb->encapsulation) + txd->flags |= PCIE_DESC_TX_ENCAP; + + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { + txd->flags |= PCIE_DESC_TX_IP4_CSUM; + l4_hdr = iph->protocol; + } else if (ipv6h->version == 6) { + l4_hdr = ipv6h->nexthdr; + } else { + nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", + iph->version); + return; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + txd->flags |= PCIE_DESC_TX_TCP_CSUM; + break; + case IPPROTO_UDP: + txd->flags |= PCIE_DESC_TX_UDP_CSUM; + break; + default: + nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", + l4_hdr); + return; + } + + u64_stats_update_begin(&r_vec->tx_sync); + if (skb->encapsulation) + r_vec->hw_csum_tx_inner += txbuf->pkt_cnt; + else + r_vec->hw_csum_tx += txbuf->pkt_cnt; + u64_stats_update_end(&r_vec->tx_sync); +} + +/** + * nfp_net_tx() - Main transmit entry point + * @skb: SKB to transmit + * @netdev: netdev structure + * + * Return: NETDEV_TX_OK on success. + */ +static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + const struct skb_frag_struct *frag; + struct nfp_net_r_vector *r_vec; + struct nfp_net_tx_desc *txd, txdg; + struct nfp_net_tx_buf *txbuf; + struct nfp_net_tx_ring *tx_ring; + struct netdev_queue *nd_q; + dma_addr_t dma_addr; + unsigned int fsize; + int f, nr_frags; + int wr_idx; + u16 qidx; + + qidx = skb_get_queue_mapping(skb); + tx_ring = &nn->tx_rings[qidx]; + r_vec = tx_ring->r_vec; + nd_q = netdev_get_tx_queue(nn->netdev, qidx); + + nr_frags = skb_shinfo(skb)->nr_frags; + + if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { + nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", + qidx, tx_ring->wr_p, tx_ring->rd_p); + netif_tx_stop_queue(nd_q); + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_busy++; + u64_stats_update_end(&r_vec->tx_sync); + return NETDEV_TX_BUSY; + } + + /* Start with the head skbuf */ + dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + goto err_free; + + wr_idx = tx_ring->wr_p % tx_ring->cnt; + + /* Stash the soft descriptor of the head then initialize it */ + txbuf = &tx_ring->txbufs[wr_idx]; + txbuf->skb = skb; + txbuf->dma_addr = dma_addr; + txbuf->fidx = -1; + txbuf->pkt_cnt = 1; + txbuf->real_len = skb->len; + + /* Build TX descriptor */ + txd = &tx_ring->txds[wr_idx]; + txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0; + txd->dma_len = cpu_to_le16(skb_headlen(skb)); + nfp_desc_set_dma_addr(txd, dma_addr); + txd->data_len = cpu_to_le16(skb->len); + + txd->flags = 0; + txd->mss = 0; + txd->l4_offset = 0; + + nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); + + nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); + + if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { + txd->flags |= PCIE_DESC_TX_VLAN; + txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + } + + /* Gather DMA */ + if (nr_frags > 0) { + /* all descs must match except for in addr, length and eop */ + txdg = *txd; + + for (f = 0; f < nr_frags; f++) { + frag = &skb_shinfo(skb)->frags[f]; + fsize = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, + fsize, DMA_TO_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + goto err_unmap; + + wr_idx = (wr_idx + 1) % tx_ring->cnt; + tx_ring->txbufs[wr_idx].skb = skb; + tx_ring->txbufs[wr_idx].dma_addr = dma_addr; + tx_ring->txbufs[wr_idx].fidx = f; + + txd = &tx_ring->txds[wr_idx]; + *txd = txdg; + txd->dma_len = cpu_to_le16(fsize); + nfp_desc_set_dma_addr(txd, dma_addr); + txd->offset_eop = + (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; + } + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_gather++; + u64_stats_update_end(&r_vec->tx_sync); + } + + netdev_tx_sent_queue(nd_q, txbuf->real_len); + + tx_ring->wr_p += nr_frags + 1; + if (nfp_net_tx_ring_should_stop(tx_ring)) + nfp_net_tx_ring_stop(nd_q, tx_ring); + + tx_ring->wr_ptr_add += nr_frags + 1; + if (!skb->xmit_more || netif_xmit_stopped(nd_q)) { + /* force memory write before we let HW know */ + wmb(); + nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); + tx_ring->wr_ptr_add = 0; + } + + skb_tx_timestamp(skb); + + return NETDEV_TX_OK; + +err_unmap: + --f; + while (f >= 0) { + frag = &skb_shinfo(skb)->frags[f]; + dma_unmap_page(&nn->pdev->dev, + tx_ring->txbufs[wr_idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); + tx_ring->txbufs[wr_idx].skb = NULL; + tx_ring->txbufs[wr_idx].dma_addr = 0; + tx_ring->txbufs[wr_idx].fidx = -2; + wr_idx = wr_idx - 1; + if (wr_idx < 0) + wr_idx += tx_ring->cnt; + } + dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + tx_ring->txbufs[wr_idx].skb = NULL; + tx_ring->txbufs[wr_idx].dma_addr = 0; + tx_ring->txbufs[wr_idx].fidx = -2; +err_free: + nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_errors++; + u64_stats_update_end(&r_vec->tx_sync); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * nfp_net_tx_complete() - Handled completed TX packets + * @tx_ring: TX ring structure + * + * Return: Number of completed TX descriptors + */ +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + const struct skb_frag_struct *frag; + struct netdev_queue *nd_q; + u32 done_pkts = 0, done_bytes = 0; + struct sk_buff *skb; + int todo, nr_frags; + u32 qcp_rd_p; + int fidx; + int idx; + + /* Work out how many descriptors have been transmitted */ + qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); + + if (qcp_rd_p == tx_ring->qcp_rd_p) + return; + + if (qcp_rd_p > tx_ring->qcp_rd_p) + todo = qcp_rd_p - tx_ring->qcp_rd_p; + else + todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p; + + while (todo--) { + idx = tx_ring->rd_p % tx_ring->cnt; + tx_ring->rd_p++; + + skb = tx_ring->txbufs[idx].skb; + if (!skb) + continue; + + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; + + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&nn->pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + + done_pkts += tx_ring->txbufs[idx].pkt_cnt; + done_bytes += tx_ring->txbufs[idx].real_len; + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&nn->pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); + } + + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; + } + + tx_ring->qcp_rd_p = qcp_rd_p; + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_bytes += done_bytes; + r_vec->tx_pkts += done_pkts; + u64_stats_update_end(&r_vec->tx_sync); + + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); + if (nfp_net_tx_ring_should_wake(tx_ring)) { + /* Make sure TX thread will see updated tx_ring->rd_p */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(nd_q))) + netif_tx_wake_queue(nd_q); + } + + WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, + "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", + tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); +} + +/** + * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring + * @tx_ring: TX ring structure + * + * Assumes that the device is stopped + */ +static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + const struct skb_frag_struct *frag; + struct netdev_queue *nd_q; + struct sk_buff *skb; + int nr_frags; + int fidx; + int idx; + + while (tx_ring->rd_p != tx_ring->wr_p) { + idx = tx_ring->rd_p % tx_ring->cnt; + + skb = tx_ring->txbufs[idx].skb; + if (skb) { + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; + + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), + DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), + DMA_TO_DEVICE); + } + + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; + } + + memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); + + tx_ring->qcp_rd_p++; + tx_ring->rd_p++; + } + + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + netdev_tx_reset_queue(nd_q); +} + +static void nfp_net_tx_timeout(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + for (i = 0; i < nn->num_tx_rings; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) + continue; + nn_warn(nn, "TX timeout on ring: %d\n", i); + } + nn_warn(nn, "TX watchdog timeout\n"); +} + +/* Receive processing + */ + +/** + * nfp_net_rx_space() - return the number of free slots on the RX ring + * @rx_ring: RX ring structure + * + * Make sure we leave at least one slot free. + * + * Return: True if there is space on the RX ring + */ +static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring) +{ + return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p); +} + +/** + * nfp_net_rx_alloc_one() - Allocate and map skb for RX + * @rx_ring: RX ring structure of the skb + * @dma_addr: Pointer to storage for DMA address (output param) + * + * This function will allcate a new skb, map it for DMA. + * + * Return: allocated skb or NULL on failure. + */ +static struct sk_buff * +nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr) +{ + struct nfp_net *nn = rx_ring->r_vec->nfp_net; + struct sk_buff *skb; + + skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz); + if (!skb) { + nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); + return NULL; + } + + *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, + nn->fl_bufsz, DMA_FROM_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { + dev_kfree_skb_any(skb); + nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + return NULL; + } + + return skb; +} + +/** + * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings + * @rx_ring: RX ring structure + * @skb: Skb to put on rings + * @dma_addr: DMA address of skb mapping + */ +static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + unsigned int wr_idx; + + wr_idx = rx_ring->wr_p % rx_ring->cnt; + + /* Stash SKB and DMA address away */ + rx_ring->rxbufs[wr_idx].skb = skb; + rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; + + /* Fill freelist descriptor */ + rx_ring->rxds[wr_idx].fld.reserved = 0; + rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); + + rx_ring->wr_p++; + rx_ring->wr_ptr_add++; + if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) { + /* Update write pointer of the freelist queue. Make + * sure all writes are flushed before telling the hardware. + */ + wmb(); + nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add); + rx_ring->wr_ptr_add = 0; + } +} + +/** + * nfp_net_rx_flush() - Free any buffers currently on the RX ring + * @rx_ring: RX ring to remove buffers from + * + * Assumes that the device is stopped + */ +static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net *nn = rx_ring->r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int idx; + + while (rx_ring->rd_p != rx_ring->wr_p) { + idx = rx_ring->rd_p % rx_ring->cnt; + + if (rx_ring->rxbufs[idx].skb) { + dma_unmap_single(&pdev->dev, + rx_ring->rxbufs[idx].dma_addr, + nn->fl_bufsz, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_ring->rxbufs[idx].skb); + rx_ring->rxbufs[idx].dma_addr = 0; + rx_ring->rxbufs[idx].skb = NULL; + } + + memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx])); + + rx_ring->rd_p++; + } +} + +/** + * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers + * @rx_ring: RX ring to fill + * + * Try to fill as many buffers as possible into freelist. Return + * number of buffers added. + * + * Return: Number of freelist buffers added. + */ +static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring) +{ + struct sk_buff *skb; + dma_addr_t dma_addr; + + while (nfp_net_rx_space(rx_ring)) { + skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr); + if (!skb) { + nfp_net_rx_flush(rx_ring); + return -ENOMEM; + } + nfp_net_rx_give_one(rx_ring, skb, dma_addr); + } + + return 0; +} + +/** + * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors + * @flags: RX descriptor flags field in CPU byte order + */ +static int nfp_net_rx_csum_has_errors(u16 flags) +{ + u16 csum_all_checked, csum_all_ok; + + csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL; + csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK; + + return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT); +} + +/** + * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags + * @nn: NFP Net device + * @r_vec: per-ring structure + * @rxd: Pointer to RX descriptor + * @skb: Pointer to SKB + */ +static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_desc *rxd, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + if (!(nn->netdev->features & NETIF_F_RXCSUM)) + return; + + if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_error++; + u64_stats_update_end(&r_vec->rx_sync); + return; + } + + /* Assume that the firmware will never report inner CSUM_OK unless outer + * L4 headers were successfully parsed. FW will always report zero UDP + * checksum as CSUM_OK. + */ + if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || + rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { + __skb_incr_checksum_unnecessary(skb); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_ok++; + u64_stats_update_end(&r_vec->rx_sync); + } + + if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || + rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { + __skb_incr_checksum_unnecessary(skb); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_inner_ok++; + u64_stats_update_end(&r_vec->rx_sync); + } +} + +/** + * nfp_net_set_hash() - Set SKB hash data + * @netdev: adapter's net_device structure + * @skb: SKB to set the hash data on + * @rxd: RX descriptor + * + * The RSS hash and hash-type are pre-pended to the packet data. + * Extract and decode it and set the skb fields. + */ +static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, + struct nfp_net_rx_desc *rxd) +{ + struct nfp_net_rx_hash *rx_hash; + + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) || + !(netdev->features & NETIF_F_RXHASH)) + return; + + rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash)); + + switch (be32_to_cpu(rx_hash->hash_type)) { + case NFP_NET_RSS_IPV4: + case NFP_NET_RSS_IPV6: + case NFP_NET_RSS_IPV6_EX: + skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3); + break; + default: + skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4); + break; + } +} + +/** + * nfp_net_rx() - receive up to @budget packets on @rx_ring + * @rx_ring: RX ring to receive from + * @budget: NAPI budget + * + * Note, this function is separated out from the napi poll function to + * more cleanly separate packet receive code from other bookkeeping + * functions performed in the napi poll function. + * + * There are differences between the NFP-3200 firmware and the + * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue + * to indicate that new packets have arrived. The NFP-6000 does not + * have this queue and uses the DD bit in the RX descriptor. This + * method cannot be used on the NFP-3200 as it causes a race + * condition: The RX ring write pointer on the NFP-3200 is updated + * after packets (and descriptors) have been DMAed. If the DD bit is + * used and subsequently the read pointer is updated this may lead to + * the RX queue to underflow (if the firmware has not yet update the + * write pointer). Therefore we use slightly ugly conditional code + * below to handle the differences. We may, in the future update the + * NFP-3200 firmware to behave the same as the firmware on the + * NFP-6000. + * + * Return: Number of packets received. + */ +static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + unsigned int data_len, meta_len; + int avail = 0, pkts_polled = 0; + struct sk_buff *skb, *new_skb; + struct nfp_net_rx_desc *rxd; + dma_addr_t new_dma_addr; + u32 qcp_wr_p; + int idx; + + if (nn->is_nfp3200) { + /* Work out how many packets arrived */ + qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); + idx = rx_ring->rd_p % rx_ring->cnt; + + if (qcp_wr_p == idx) + /* No new packets */ + return 0; + + if (qcp_wr_p > idx) + avail = qcp_wr_p - idx; + else + avail = qcp_wr_p + rx_ring->cnt - idx; + } else { + avail = budget + 1; + } + + while (avail > 0 && pkts_polled < budget) { + idx = rx_ring->rd_p % rx_ring->cnt; + + rxd = &rx_ring->rxds[idx]; + if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) { + if (nn->is_nfp3200) + nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n", + rx_ring->idx, idx, + rxd->vals[0], rxd->vals[1]); + break; + } + /* Memory barrier to ensure that we won't do other reads + * before the DD bit. + */ + dma_rmb(); + + rx_ring->rd_p++; + pkts_polled++; + avail--; + + skb = rx_ring->rxbufs[idx].skb; + + new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr); + if (!new_skb) { + nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, + rx_ring->rxbufs[idx].dma_addr); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_drops++; + u64_stats_update_end(&r_vec->rx_sync); + continue; + } + + dma_unmap_single(&nn->pdev->dev, + rx_ring->rxbufs[idx].dma_addr, + nn->fl_bufsz, DMA_FROM_DEVICE); + + nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); + + meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; + data_len = le16_to_cpu(rxd->rxd.data_len); + + if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) { + dev_kfree_skb_any(skb); + continue; + } + + if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) { + /* The packet data starts after the metadata */ + skb_reserve(skb, meta_len); + } else { + /* The packet data starts at a fixed offset */ + skb_reserve(skb, nn->rx_offset); + } + + /* Adjust the SKB for the dynamic meta data pre-pended */ + skb_put(skb, data_len - meta_len); + + nfp_net_set_hash(nn->netdev, skb, rxd); + + /* Pad small frames to minimum */ + if (skb_put_padto(skb, 60)) + break; + + /* Stats update */ + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_pkts++; + r_vec->rx_bytes += skb->len; + u64_stats_update_end(&r_vec->rx_sync); + + skb_record_rx_queue(skb, rx_ring->idx); + skb->protocol = eth_type_trans(skb, nn->netdev); + + nfp_net_rx_csum(nn, r_vec, rxd, skb); + + if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxd->rxd.vlan)); + + napi_gro_receive(&rx_ring->r_vec->napi, skb); + } + + if (nn->is_nfp3200) + nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled); + + return pkts_polled; +} + +/** + * nfp_net_poll() - napi poll function + * @napi: NAPI structure + * @budget: NAPI budget + * + * Return: number of packets polled. + */ +static int nfp_net_poll(struct napi_struct *napi, int budget) +{ + struct nfp_net_r_vector *r_vec = + container_of(napi, struct nfp_net_r_vector, napi); + struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; + struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring; + struct nfp_net *nn = r_vec->nfp_net; + struct netdev_queue *txq; + unsigned int pkts_polled; + + tx_ring = &nn->tx_rings[rx_ring->idx]; + txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nfp_net_tx_complete(tx_ring); + + pkts_polled = nfp_net_rx(rx_ring, budget); + + if (pkts_polled < budget) { + napi_complete_done(napi, pkts_polled); + nfp_net_irq_unmask(nn, r_vec->irq_idx); + } + + return pkts_polled; +} + +/* Setup and Configuration + */ + +/** + * nfp_net_tx_ring_free() - Free resources allocated to a TX ring + * @tx_ring: TX ring to free + */ +static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0); + + kfree(tx_ring->txbufs); + + if (tx_ring->txds) + dma_free_coherent(&pdev->dev, tx_ring->size, + tx_ring->txds, tx_ring->dma); + + tx_ring->cnt = 0; + tx_ring->wr_p = 0; + tx_ring->rd_p = 0; + tx_ring->qcp_rd_p = 0; + tx_ring->wr_ptr_add = 0; + + tx_ring->txbufs = NULL; + tx_ring->txds = NULL; + tx_ring->dma = 0; + tx_ring->size = 0; +} + +/** + * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring + * @tx_ring: TX Ring structure to allocate + * + * Return: 0 on success, negative errno otherwise. + */ +static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int sz; + + tx_ring->cnt = nn->txd_cnt; + + tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; + tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->txds) + goto err_alloc; + + sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt; + tx_ring->txbufs = kzalloc(sz, GFP_KERNEL); + if (!tx_ring->txbufs) + goto err_alloc; + + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx); + + netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); + + nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", + tx_ring->idx, tx_ring->qcidx, + tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds); + + return 0; + +err_alloc: + nfp_net_tx_ring_free(tx_ring); + return -ENOMEM; +} + +/** + * nfp_net_rx_ring_free() - Free resources allocated to a RX ring + * @rx_ring: RX ring to free + */ +static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0); + + kfree(rx_ring->rxbufs); + + if (rx_ring->rxds) + dma_free_coherent(&pdev->dev, rx_ring->size, + rx_ring->rxds, rx_ring->dma); + + rx_ring->cnt = 0; + rx_ring->wr_p = 0; + rx_ring->rd_p = 0; + rx_ring->wr_ptr_add = 0; + + rx_ring->rxbufs = NULL; + rx_ring->rxds = NULL; + rx_ring->dma = 0; + rx_ring->size = 0; +} + +/** + * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring + * @rx_ring: RX ring to allocate + * + * Return: 0 on success, negative errno otherwise. + */ +static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int sz; + + rx_ring->cnt = nn->rxd_cnt; + + rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; + rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->rxds) + goto err_alloc; + + sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt; + rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL); + if (!rx_ring->rxbufs) + goto err_alloc; + + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx); + + nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", + rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, + rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); + + return 0; + +err_alloc: + nfp_net_rx_ring_free(rx_ring); + return -ENOMEM; +} + +static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free) +{ + struct nfp_net_r_vector *r_vec; + struct msix_entry *entry; + + while (n_free--) { + r_vec = &nn->r_vecs[n_free]; + entry = &nn->irq_entries[r_vec->irq_idx]; + + nfp_net_rx_ring_free(r_vec->rx_ring); + nfp_net_tx_ring_free(r_vec->tx_ring); + + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, r_vec); + + netif_napi_del(&r_vec->napi); + } +} + +/** + * nfp_net_free_rings() - Free all ring resources + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_free_rings(struct nfp_net *nn) +{ + __nfp_net_free_rings(nn, nn->num_r_vecs); +} + +/** + * nfp_net_alloc_rings() - Allocate resources for RX and TX rings + * @nn: NFP Net device to reconfigure + * + * Return: 0 on success or negative errno on error. + */ +static int nfp_net_alloc_rings(struct nfp_net *nn) +{ + struct nfp_net_r_vector *r_vec; + struct msix_entry *entry; + int err; + int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + r_vec = &nn->r_vecs[r]; + entry = &nn->irq_entries[r_vec->irq_idx]; + + /* Setup NAPI */ + netif_napi_add(nn->netdev, &r_vec->napi, + nfp_net_poll, NAPI_POLL_WEIGHT); + + snprintf(r_vec->name, sizeof(r_vec->name), + "%s-rxtx-%d", nn->netdev->name, r); + err = request_irq(entry->vector, r_vec->handler, 0, + r_vec->name, r_vec); + if (err) { + nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector); + goto err_napi_del; + } + + irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", + r, entry->vector, entry->entry); + + /* Allocate TX ring resources */ + err = nfp_net_tx_ring_alloc(r_vec->tx_ring); + if (err) + goto err_free_irq; + + /* Allocate RX ring resources */ + err = nfp_net_rx_ring_alloc(r_vec->rx_ring); + if (err) + goto err_free_tx; + } + + return 0; + +err_free_tx: + nfp_net_tx_ring_free(r_vec->tx_ring); +err_free_irq: + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, r_vec); +err_napi_del: + netif_napi_del(&r_vec->napi); + __nfp_net_free_rings(nn, r); + return err; +} + +/** + * nfp_net_rss_write_itbl() - Write RSS indirection table to device + * @nn: NFP Net device to reconfigure + */ +void nfp_net_rss_write_itbl(struct nfp_net *nn) +{ + int i; + + for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4) + nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i, + get_unaligned_le32(nn->rss_itbl + i)); +} + +/** + * nfp_net_rss_write_key() - Write RSS hash key to device + * @nn: NFP Net device to reconfigure + */ +void nfp_net_rss_write_key(struct nfp_net *nn) +{ + int i; + + for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4) + nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, + get_unaligned_le32(nn->rss_key + i)); +} + +/** + * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW + * @nn: NFP Net device to reconfigure + */ +void nfp_net_coalesce_write_cfg(struct nfp_net *nn) +{ + u8 i; + u32 factor; + u32 value; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->me_freq_mhz / 16; + + /* copy RX interrupt coalesce parameters */ + value = (nn->rx_coalesce_max_frames << 16) | + (factor * nn->rx_coalesce_usecs); + for (i = 0; i < nn->num_r_vecs; i++) + nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); + + /* copy TX interrupt coalesce parameters */ + value = (nn->tx_coalesce_max_frames << 16) | + (factor * nn->tx_coalesce_usecs); + for (i = 0; i < nn->num_r_vecs; i++) + nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); +} + +/** + * nfp_net_write_mac_addr() - Write mac address to device registers + * @nn: NFP Net device to reconfigure + * @mac: Six-byte MAC address to be written + * + * We do a bit of byte swapping dance because firmware is LE. + */ +static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) +{ + nn_writel(nn, NFP_NET_CFG_MACADDR + 0, + get_unaligned_be32(nn->netdev->dev_addr)); + /* We can't do writew for NFP-3200 compatibility */ + nn_writel(nn, NFP_NET_CFG_MACADDR + 4, + get_unaligned_be16(nn->netdev->dev_addr + 4) << 16); +} + +/** + * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_clear_config_and_disable(struct nfp_net *nn) +{ + u32 new_ctrl, update; + int err; + + new_ctrl = nn->ctrl; + new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; + update = NFP_NET_CFG_UPDATE_GEN; + update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_RING; + + if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; + + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, update); + if (err) { + nn_err(nn, "Could not disable device: %d\n", err); + return; + } + + nn->ctrl = new_ctrl; +} + +/** + * nfp_net_start_vec() - Start ring vector + * @nn: NFP Net device structure + * @r_vec: Ring vector to be started + */ +static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +{ + unsigned int irq_vec; + int err = 0; + + irq_vec = nn->irq_entries[r_vec->irq_idx].vector; + + disable_irq(irq_vec); + + err = nfp_net_rx_fill_freelist(r_vec->rx_ring); + if (err) { + nn_err(nn, "RV%02d: couldn't allocate enough buffers\n", + r_vec->irq_idx); + goto out; + } + + napi_enable(&r_vec->napi); +out: + enable_irq(irq_vec); + + return err; +} + +static int nfp_net_netdev_open(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err, r; + u32 update = 0; + u32 new_ctrl; + + if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { + nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); + return -EBUSY; + } + + new_ctrl = nn->ctrl; + + /* Step 1: Allocate resources for rings and the like + * - Request interrupts + * - Allocate RX and TX ring resources + * - Setup initial RSS table + */ + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", + nn->exn_name, sizeof(nn->exn_name), + NFP_NET_IRQ_EXN_IDX, nn->exn_handler); + if (err) + return err; + + err = nfp_net_alloc_rings(nn); + if (err) + goto err_free_exn; + + err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); + if (err) + goto err_free_rings; + + err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + if (err) + goto err_free_rings; + + if (nn->cap & NFP_NET_CFG_CTRL_RSS) { + nfp_net_rss_write_key(nn); + nfp_net_rss_write_itbl(nn); + nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); + update |= NFP_NET_CFG_UPDATE_RSS; + } + + if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { + nfp_net_coalesce_write_cfg(nn); + + new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + update |= NFP_NET_CFG_UPDATE_IRQMOD; + } + + /* Step 2: Configure the NFP + * - Enable rings from 0 to tx_rings/rx_rings - 1. + * - Write MAC address (in case it changed) + * - Set the MTU + * - Set the Freelist buffer size + * - Enable the FW + */ + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + + nfp_net_write_mac_addr(nn, netdev->dev_addr); + + nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + + /* Enable device */ + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + update |= NFP_NET_CFG_UPDATE_GEN; + update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_RING; + if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, update); + if (err) + goto err_clear_config; + + nn->ctrl = new_ctrl; + + /* Since reconfiguration requests while NFP is down are ignored we + * have to wipe the entire VXLAN configuration and reinitialize it. + */ + if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); + memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); + vxlan_get_rx_port(netdev); + } + + /* Step 3: Enable for kernel + * - put some freelist descriptors on each RX ring + * - enable NAPI on each ring + * - enable all TX queues + * - set link state + */ + for (r = 0; r < nn->num_r_vecs; r++) { + err = nfp_net_start_vec(nn, &nn->r_vecs[r]); + if (err) + goto err_disable_napi; + } + + netif_tx_wake_all_queues(netdev); + + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", + nn->lsc_name, sizeof(nn->lsc_name), + NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); + if (err) + goto err_stop_tx; + nfp_net_read_link_status(nn); + + return 0; + +err_stop_tx: + netif_tx_disable(netdev); + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_tx_flush(nn->r_vecs[r].tx_ring); +err_disable_napi: + while (r--) { + napi_disable(&nn->r_vecs[r].napi); + nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + } +err_clear_config: + nfp_net_clear_config_and_disable(nn); +err_free_rings: + nfp_net_free_rings(nn); +err_free_exn: + nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); + return err; +} + +/** + * nfp_net_netdev_close() - Called when the device is downed + * @netdev: netdev structure + */ +static int nfp_net_netdev_close(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int r; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { + nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); + return 0; + } + + /* Step 1: Disable RX and TX rings from the Linux kernel perspective + */ + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + netif_carrier_off(netdev); + nn->link_up = false; + + for (r = 0; r < nn->num_r_vecs; r++) + napi_disable(&nn->r_vecs[r].napi); + + netif_tx_disable(netdev); + + /* Step 2: Tell NFP + */ + nfp_net_clear_config_and_disable(nn); + + /* Step 3: Free resources + */ + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + nfp_net_tx_flush(nn->r_vecs[r].tx_ring); + } + + nfp_net_free_rings(nn); + nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); + + nn_dbg(nn, "%s down", netdev->name); + return 0; +} + +static void nfp_net_set_rx_mode(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 new_ctrl; + + new_ctrl = nn->ctrl; + + if (netdev->flags & IFF_PROMISC) { + if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) + new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; + else + nn_warn(nn, "FW does not support promiscuous mode\n"); + } else { + new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; + } + + if (new_ctrl == nn->ctrl) + return; + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN)) + return; + + nn->ctrl = new_ctrl; +} + +static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 tmp; + + nn_dbg(nn, "New MTU = %d\n", new_mtu); + + if (new_mtu < 68 || new_mtu > nn->max_mtu) { + nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + /* Freelist buffer size rounded up to the nearest 1K */ + tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND; + nn->fl_bufsz = roundup(tmp, 1024); + + /* restart if running */ + if (netif_running(netdev)) { + nfp_net_netdev_close(netdev); + nfp_net_netdev_open(netdev); + } + + return 0; +} + +static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nfp_net *nn = netdev_priv(netdev); + int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; + u64 data[3]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&r_vec->rx_sync); + data[0] = r_vec->rx_pkts; + data[1] = r_vec->rx_bytes; + data[2] = r_vec->rx_drops; + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); + stats->rx_packets += data[0]; + stats->rx_bytes += data[1]; + stats->rx_dropped += data[2]; + + do { + start = u64_stats_fetch_begin(&r_vec->tx_sync); + data[0] = r_vec->tx_pkts; + data[1] = r_vec->tx_bytes; + data[2] = r_vec->tx_errors; + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); + stats->tx_packets += data[0]; + stats->tx_bytes += data[1]; + stats->tx_errors += data[2]; + } + + return stats; +} + +static int nfp_net_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct nfp_net *nn = netdev_priv(netdev); + u32 new_ctrl; + int err; + + /* Assume this is not called with features we have not advertised */ + + new_ctrl = nn->ctrl; + + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM; + } + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM; + } + + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + new_ctrl |= NFP_NET_CFG_CTRL_LSO; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_LSO; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) { + if (features & NETIF_F_HW_VLAN_CTAG_TX) + new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN; + } + + if (changed & NETIF_F_SG) { + if (features & NETIF_F_SG) + new_ctrl |= NFP_NET_CFG_CTRL_GATHER; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; + } + + nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", + netdev->features, features, changed); + + if (new_ctrl == nn->ctrl) + return 0; + + nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); + if (err) + return err; + + nn->ctrl = new_ctrl; + + return 0; +} + +static netdev_features_t +nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u8 l4_hdr; + + /* We can't do TSO over double tagged packets (802.1AD) */ + features &= vlan_features_check(skb, features); + + if (!skb->encapsulation) + return features; + + /* Ensure that inner L4 header offset fits into TX descriptor field */ + if (skb_is_gso(skb)) { + u32 hdrlen; + + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + + if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) + features &= ~NETIF_F_GSO_MASK; + } + + /* VXLAN/GRE check */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) || + (l4_hdr == IPPROTO_UDP && + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} + +/** + * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW + * @nn: NFP Net device to reconfigure + * @idx: Index into the port table where new port should be written + * @port: UDP port to configure (pass zero to remove VXLAN port) + */ +static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) +{ + int i; + + nn->vxlan_ports[idx] = port; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) + return; + + BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) + nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), + be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | + be16_to_cpu(nn->vxlan_ports[i])); + + nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); +} + +/** + * nfp_net_find_vxlan_idx() - find table entry of the port or a free one + * @nn: NFP Network structure + * @port: UDP port to look for + * + * Return: if the port is already in the table -- it's position; + * if the port is not in the table -- free position to use; + * if the table is full -- -ENOSPC. + */ +static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) +{ + int i, free_idx = -ENOSPC; + + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { + if (nn->vxlan_ports[i] == port) + return i; + if (!nn->vxlan_usecnt[i]) + free_idx = i; + } + + return free_idx; +} + +static void nfp_net_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct nfp_net *nn = netdev_priv(netdev); + int idx; + + idx = nfp_net_find_vxlan_idx(nn, port); + if (idx == -ENOSPC) + return; + + if (!nn->vxlan_usecnt[idx]++) + nfp_net_set_vxlan_port(nn, idx, port); +} + +static void nfp_net_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct nfp_net *nn = netdev_priv(netdev); + int idx; + + idx = nfp_net_find_vxlan_idx(nn, port); + if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) + return; + + if (!--nn->vxlan_usecnt[idx]) + nfp_net_set_vxlan_port(nn, idx, 0); +} + +static const struct net_device_ops nfp_net_netdev_ops = { + .ndo_open = nfp_net_netdev_open, + .ndo_stop = nfp_net_netdev_close, + .ndo_start_xmit = nfp_net_tx, + .ndo_get_stats64 = nfp_net_stat64, + .ndo_tx_timeout = nfp_net_tx_timeout, + .ndo_set_rx_mode = nfp_net_set_rx_mode, + .ndo_change_mtu = nfp_net_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = nfp_net_set_features, + .ndo_features_check = nfp_net_features_check, + .ndo_add_vxlan_port = nfp_net_add_vxlan_port, + .ndo_del_vxlan_port = nfp_net_del_vxlan_port, +}; + +/** + * nfp_net_info() - Print general info about the NIC + * @nn: NFP Net device to reconfigure + */ +void nfp_net_info(struct nfp_net *nn) +{ + nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", + nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx", + nn->is_vf ? "VF " : "", + nn->num_tx_rings, nn->max_tx_rings, + nn->num_rx_rings, nn->max_rx_rings); + nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", + nn->fw_ver.resv, nn->fw_ver.class, + nn->fw_ver.major, nn->fw_ver.minor, + nn->max_mtu); + nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + nn->cap, + nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", + nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", + nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", + nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", + nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", + nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", + nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", + nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", + nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", + nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", + nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : ""); +} + +/** + * nfp_net_netdev_alloc() - Allocate netdev and related structure + * @pdev: PCI device + * @max_tx_rings: Maximum number of TX rings supported by device + * @max_rx_rings: Maximum number of RX rings supported by device + * + * This function allocates a netdev device and fills in the initial + * part of the @struct nfp_net structure. + * + * Return: NFP Net device structure, or ERR_PTR on error. + */ +struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, + int max_tx_rings, int max_rx_rings) +{ + struct net_device *netdev; + struct nfp_net *nn; + int nqs; + + netdev = alloc_etherdev_mqs(sizeof(struct nfp_net), + max_tx_rings, max_rx_rings); + if (!netdev) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(netdev, &pdev->dev); + nn = netdev_priv(netdev); + + nn->netdev = netdev; + nn->pdev = pdev; + + nn->max_tx_rings = max_tx_rings; + nn->max_rx_rings = max_rx_rings; + + nqs = netif_get_num_default_rss_queues(); + nn->num_tx_rings = min_t(int, nqs, max_tx_rings); + nn->num_rx_rings = min_t(int, nqs, max_rx_rings); + + nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; + nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + + spin_lock_init(&nn->reconfig_lock); + spin_lock_init(&nn->link_status_lock); + + return nn; +} + +/** + * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did + * @nn: NFP Net device to reconfigure + */ +void nfp_net_netdev_free(struct nfp_net *nn) +{ + free_netdev(nn->netdev); +} + +/** + * nfp_net_rss_init() - Set the initial RSS parameters + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_rss_init(struct nfp_net *nn) +{ + int i; + + netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + + for (i = 0; i < sizeof(nn->rss_itbl); i++) + nn->rss_itbl[i] = + ethtool_rxfh_indir_default(i, nn->num_rx_rings); + + /* Enable IPv4/IPv6 TCP by default */ + nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | + NFP_NET_CFG_RSS_IPV6_TCP | + NFP_NET_CFG_RSS_TOEPLITZ | + NFP_NET_CFG_RSS_MASK; +} + +/** + * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_irqmod_init(struct nfp_net *nn) +{ + nn->rx_coalesce_usecs = 50; + nn->rx_coalesce_max_frames = 64; + nn->tx_coalesce_usecs = 50; + nn->tx_coalesce_max_frames = 64; +} + +/** + * nfp_net_netdev_init() - Initialise/finalise the netdev structure + * @netdev: netdev structure + * + * Return: 0 on success or negative errno on error. + */ +int nfp_net_netdev_init(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err; + + /* Get some of the read-only fields from the BAR */ + nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); + nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); + + nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + + /* Set default MTU and Freelist buffer size */ + if (nn->max_mtu < NFP_NET_DEFAULT_MTU) + netdev->mtu = nn->max_mtu; + else + netdev->mtu = NFP_NET_DEFAULT_MTU; + nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ; + + /* Advertise/enable offloads based on capabilities + * + * Note: netdev->features show the currently enabled features + * and netdev->hw_features advertises which features are + * supported. By default we enable most features. + */ + netdev->hw_features = NETIF_F_HIGHDMA; + if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { + netdev->hw_features |= NETIF_F_RXCSUM; + nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + } + if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { + netdev->hw_features |= NETIF_F_SG; + nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; + } + if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + nn->ctrl |= NFP_NET_CFG_CTRL_LSO; + } + if (nn->cap & NFP_NET_CFG_CTRL_RSS) { + netdev->hw_features |= NETIF_F_RXHASH; + nfp_net_rss_init(nn); + nn->ctrl |= NFP_NET_CFG_CTRL_RSS; + } + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && + nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL; + nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; + + netdev->hw_enc_features = netdev->hw_features; + } + + netdev->vlan_features = netdev->hw_features; + + if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + } + + netdev->features = netdev->hw_features; + + /* Advertise but disable TSO by default. */ + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + /* Allow L2 Broadcast and Multicast through by default, if supported */ + if (nn->cap & NFP_NET_CFG_CTRL_L2BC) + nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; + if (nn->cap & NFP_NET_CFG_CTRL_L2MC) + nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* Allow IRQ moderation, if supported */ + if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { + nfp_net_irqmod_init(nn); + nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + } + + /* On NFP-3200 enable MSI-X auto-masking, if supported and the + * interrupts are not shared. + */ + if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO) + nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO; + + /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */ + if (nn->fw_ver.major >= 2) + nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); + else + nn->rx_offset = NFP_NET_RX_OFFSET; + + /* Stash the re-configuration queue away. First odd queue in TX Bar */ + nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; + + /* Make sure the FW knows the netdev is supposed to be disabled here */ + nn_writel(nn, NFP_NET_CFG_CTRL, 0); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING | + NFP_NET_CFG_UPDATE_GEN); + if (err) + return err; + + /* Finalise the netdev setup */ + ether_setup(netdev); + netdev->netdev_ops = &nfp_net_netdev_ops; + netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); + netif_carrier_off(netdev); + + nfp_net_set_ethtool_ops(netdev); + nfp_net_irqs_assign(netdev); + + return register_netdev(netdev); +} + +/** + * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did. + * @netdev: netdev structure + */ +void nfp_net_netdev_clean(struct net_device *netdev) +{ + unregister_netdev(netdev); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h new file mode 100644 index 000000000000..8692003aeed8 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_ctrl.h + * Netronome network device driver: Control BAR layout + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + */ + +#ifndef _NFP_NET_CTRL_H_ +#define _NFP_NET_CTRL_H_ + +/* IMPORTANT: This header file is shared with the FW, + * no OS specific constructs, please! + */ + +/** + * Configuration BAR size. + * + * The configuration BAR is 8K in size, but on the NFP6000, due to + * THB-350, 32k needs to be reserved. + */ +#define NFP_NET_CFG_BAR_SZ (32 * 1024) + +/** + * Offset in Freelist buffer where packet starts on RX + */ +#define NFP_NET_RX_OFFSET 32 + +/** + * Maximum header size supported for LSO frames + */ +#define NFP_NET_LSO_MAX_HDR_SZ 255 + +/** + * Hash type pre-pended when a RSS hash was computed + */ +#define NFP_NET_RSS_NONE 0 +#define NFP_NET_RSS_IPV4 1 +#define NFP_NET_RSS_IPV6 2 +#define NFP_NET_RSS_IPV6_EX 3 +#define NFP_NET_RSS_IPV4_TCP 4 +#define NFP_NET_RSS_IPV6_TCP 5 +#define NFP_NET_RSS_IPV6_EX_TCP 6 +#define NFP_NET_RSS_IPV4_UDP 7 +#define NFP_NET_RSS_IPV6_UDP 8 +#define NFP_NET_RSS_IPV6_EX_UDP 9 + +/** + * @NFP_NET_TXR_MAX: Maximum number of TX rings + * @NFP_NET_TXR_MASK: Mask for TX rings + * @NFP_NET_RXR_MAX: Maximum number of RX rings + * @NFP_NET_RXR_MASK: Mask for RX rings + */ +#define NFP_NET_TXR_MAX 64 +#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) +#define NFP_NET_RXR_MAX 64 +#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) + +/** + * Read/Write config words (0x0000 - 0x002c) + * @NFP_NET_CFG_CTRL: Global control + * @NFP_NET_CFG_UPDATE: Indicate which fields are updated + * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings + * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings + * @NFP_NET_CFG_MTU: Set MTU size + * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) + * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * @NFP_NET_CFG_MACADDR: MAC address + * + * TODO: + * - define Error details in UPDATE + */ +#define NFP_NET_CFG_CTRL 0x0000 +#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ +#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ +#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ +#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ +#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ +#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ +#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ +#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ +#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ +#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ +#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */ +#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ +#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */ +#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ +#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ +#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ +#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ +#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */ +#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */ +#define NFP_NET_CFG_UPDATE 0x0004 +#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ +#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ +#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ +#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ +#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ +#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ +#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ +#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ +#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ +#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ +#define NFP_NET_CFG_TXRS_ENABLE 0x0008 +#define NFP_NET_CFG_RXRS_ENABLE 0x0010 +#define NFP_NET_CFG_MTU 0x0018 +#define NFP_NET_CFG_FLBUFSZ 0x001c +#define NFP_NET_CFG_EXN 0x001f +#define NFP_NET_CFG_LSC 0x0020 +#define NFP_NET_CFG_MACADDR 0x0024 + +/** + * Read-only words (0x0030 - 0x0050): + * @NFP_NET_CFG_VERSION: Firmware version number + * @NFP_NET_CFG_STS: Status + * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) + * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) + * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) + * + * TODO: + * - define more STS bits + */ +#define NFP_NET_CFG_VERSION 0x0030 +#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24) +#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16) +#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) +#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0 +#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8) +#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) +#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0) +#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) +#define NFP_NET_CFG_STS 0x0034 +#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +#define NFP_NET_CFG_CAP 0x0038 +#define NFP_NET_CFG_MAX_TXRINGS 0x003c +#define NFP_NET_CFG_MAX_RXRINGS 0x0040 +#define NFP_NET_CFG_MAX_MTU 0x0044 +/* Next two words are being used by VFs for solving THB350 issue */ +#define NFP_NET_CFG_START_TXQ 0x0048 +#define NFP_NET_CFG_START_RXQ 0x004c + +/** + * NFP-3200 workaround (0x0050 - 0x0058) + * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix) + */ +#define NFP_NET_CFG_SPARE_ADDR 0x0050 +/** + * NFP6000/NFP4000 - Prepend configuration + */ +#define NFP_NET_CFG_RX_OFFSET 0x0050 +#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ + +/** + * NFP6000/NFP4000 - VXLAN/UDP encap configuration + * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports + * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes + */ +#define NFP_NET_CFG_VXLAN_PORT 0x0060 +#define NFP_NET_CFG_VXLAN_SZ 0x0008 + +/** + * 64B reserved for future use (0x0080 - 0x00c0) + */ +#define NFP_NET_CFG_RESERVED 0x0080 +#define NFP_NET_CFG_RESERVED_SZ 0x0040 + +/** + * RSS configuration (0x0100 - 0x01ac): + * Used only when NFP_NET_CFG_CTRL_RSS is enabled + * @NFP_NET_CFG_RSS_CFG: RSS configuration word + * @NFP_NET_CFG_RSS_KEY: RSS "secret" key + * @NFP_NET_CFG_RSS_ITBL: RSS indirection table + */ +#define NFP_NET_CFG_RSS_BASE 0x0100 +#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE +#define NFP_NET_CFG_RSS_MASK (0x7f) +#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) +#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ +#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ +#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ +#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ +#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ +#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) +#define NFP_NET_CFG_RSS_KEY_SZ 0x28 +#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ + NFP_NET_CFG_RSS_KEY_SZ) +#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 + +/** + * TX ring configuration (0x200 - 0x800) + * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration + * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) + * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet + */ +#define NFP_NET_CFG_TXR_BASE 0x0200 +#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) +#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) +#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) +#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \ + ((_x) * 0x4)) + +/** + * RX ring configuration (0x0800 - 0x0c00) + * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration + * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) + * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) + * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) + * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_RXR_BASE 0x0800 +#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) +#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) +#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) +#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \ + ((_x) * 0x4)) + +/** + * Interrupt Control/Cause registers (0x0c00 - 0x0d00) + * These registers are only used when MSI-X auto-masking is not + * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index + * by MSI-X entry and are 1B in size. If an entry is zero, the + * corresponding entry is enabled. If the FW generates an interrupt, + * it writes a cause into the corresponding field. This also masks + * the MSI-X entry and the host driver must clear the register to + * re-enable the interrupt. + */ +#define NFP_NET_CFG_ICR_BASE 0x0c00 +#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) +#define NFP_NET_CFG_ICR_UNMASKED 0x0 +#define NFP_NET_CFG_ICR_RXTX 0x1 +#define NFP_NET_CFG_ICR_LSC 0x2 + +/** + * General device stats (0x0d00 - 0x0d90) + * all counters are 64bit. + */ +#define NFP_NET_CFG_STATS_BASE 0x0d00 +#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) +#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) +#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) +#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) +#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) +#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) +#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) +#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) +#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) + +#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) +#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) +#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) +#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) +#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) +#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) +#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) +#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) +#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) + +/** + * Per ring stats (0x1000 - 0x1800) + * options, 64bit per entry + * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) + * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) + */ +#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 +#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ + ((_x) * 0x10)) +#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 +#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ + ((_x) * 0x10)) + +#endif /* _NFP_NET_CTRL_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c new file mode 100644 index 000000000000..4c97c713121c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> + +#include "nfp_net.h" + +static struct dentry *nfp_dir; + +static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) +{ + struct nfp_net_rx_ring *rx_ring = file->private; + int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; + struct nfp_net_rx_desc *rxd; + struct sk_buff *skb; + struct nfp_net *nn; + int i; + + rtnl_lock(); + + if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) + goto out; + nn = rx_ring->r_vec->nfp_net; + if (!netif_running(nn->netdev)) + goto out; + + rxd_cnt = rx_ring->cnt; + + fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); + fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); + rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); + rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); + + seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", + rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, + fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); + + for (i = 0; i < rxd_cnt; i++) { + rxd = &rx_ring->rxds[i]; + seq_printf(file, "%04d: 0x%08x 0x%08x", i, + rxd->vals[0], rxd->vals[1]); + + skb = READ_ONCE(rx_ring->rxbufs[i].skb); + if (skb) + seq_printf(file, " skb->head=%p skb->data=%p", + skb->head, skb->data); + + if (rx_ring->rxbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &rx_ring->rxbufs[i].dma_addr); + + if (i == rx_ring->rd_p % rxd_cnt) + seq_puts(file, " H_RD "); + if (i == rx_ring->wr_p % rxd_cnt) + seq_puts(file, " H_WR "); + if (i == fl_rd_p % rxd_cnt) + seq_puts(file, " FL_RD"); + if (i == fl_wr_p % rxd_cnt) + seq_puts(file, " FL_WR"); + if (i == rx_rd_p % rxd_cnt) + seq_puts(file, " RX_RD"); + if (i == rx_wr_p % rxd_cnt) + seq_puts(file, " RX_WR"); + + seq_putc(file, '\n'); + } +out: + rtnl_unlock(); + return 0; +} + +static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f) +{ + return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private); +} + +static const struct file_operations nfp_rx_q_fops = { + .owner = THIS_MODULE, + .open = nfp_net_debugfs_rx_q_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) +{ + struct nfp_net_tx_ring *tx_ring = file->private; + struct nfp_net_tx_desc *txd; + int d_rd_p, d_wr_p, txd_cnt; + struct sk_buff *skb; + struct nfp_net *nn; + int i; + + rtnl_lock(); + + if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) + goto out; + nn = tx_ring->r_vec->nfp_net; + if (!netif_running(nn->netdev)) + goto out; + + txd_cnt = tx_ring->cnt; + + d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); + d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); + + seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", + tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); + + for (i = 0; i < txd_cnt; i++) { + txd = &tx_ring->txds[i]; + seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i, + txd->vals[0], txd->vals[1], + txd->vals[2], txd->vals[3]); + + skb = READ_ONCE(tx_ring->txbufs[i].skb); + if (skb) + seq_printf(file, " skb->head=%p skb->data=%p", + skb->head, skb->data); + if (tx_ring->txbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &tx_ring->txbufs[i].dma_addr); + + if (i == tx_ring->rd_p % txd_cnt) + seq_puts(file, " H_RD"); + if (i == tx_ring->wr_p % txd_cnt) + seq_puts(file, " H_WR"); + if (i == d_rd_p % txd_cnt) + seq_puts(file, " D_RD"); + if (i == d_wr_p % txd_cnt) + seq_puts(file, " D_WR"); + + seq_putc(file, '\n'); + } +out: + rtnl_unlock(); + return 0; +} + +static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f) +{ + return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private); +} + +static const struct file_operations nfp_tx_q_fops = { + .owner = THIS_MODULE, + .open = nfp_net_debugfs_tx_q_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +{ + static struct dentry *queues, *tx, *rx; + char int_name[16]; + int i; + + if (IS_ERR_OR_NULL(nfp_dir)) + return; + + nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); + if (IS_ERR_OR_NULL(nn->debugfs_dir)) + return; + + /* Create queue debugging sub-tree */ + queues = debugfs_create_dir("queue", nn->debugfs_dir); + if (IS_ERR_OR_NULL(nn->debugfs_dir)) + return; + + rx = debugfs_create_dir("rx", queues); + tx = debugfs_create_dir("tx", queues); + if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx)) + return; + + for (i = 0; i < nn->num_rx_rings; i++) { + sprintf(int_name, "%d", i); + debugfs_create_file(int_name, S_IRUSR, rx, + &nn->rx_rings[i], &nfp_rx_q_fops); + } + + for (i = 0; i < nn->num_tx_rings; i++) { + sprintf(int_name, "%d", i); + debugfs_create_file(int_name, S_IRUSR, tx, + &nn->tx_rings[i], &nfp_tx_q_fops); + } +} + +void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +{ + debugfs_remove_recursive(nn->debugfs_dir); + nn->debugfs_dir = NULL; +} + +void nfp_net_debugfs_create(void) +{ + nfp_dir = debugfs_create_dir("nfp_net", NULL); +} + +void nfp_net_debugfs_destroy(void) +{ + debugfs_remove_recursive(nfp_dir); + nfp_dir = NULL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c new file mode 100644 index 000000000000..9a4084a68db5 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -0,0 +1,640 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_ethtool.c + * Netronome network device driver: ethtool support + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/ethtool.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +/* Support for stats. Returns netdev, driver, and device stats */ +enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; +struct _nfp_net_et_stats { + char name[ETH_GSTRING_LEN]; + int type; + int sz; + int off; +}; + +#define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \ + FIELD_SIZEOF(struct net_device_stats, m), \ + offsetof(struct net_device_stats, m) +/* For stats in the control BAR (other than Q stats) */ +#define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \ + sizeof(u64), \ + (m) +static const struct _nfp_net_et_stats nfp_net_et_stats[] = { + /* netdev stats */ + {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)}, + {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)}, + {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)}, + {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)}, + {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)}, + {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)}, + {"multicast", NN_ET_NETDEV_STAT(multicast)}, + {"collisions", NN_ET_NETDEV_STAT(collisions)}, + {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)}, + {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)}, + /* Stats from the device */ + {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)}, + {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)}, + {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)}, + {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)}, + {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)}, + {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)}, + {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)}, + {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)}, + {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)}, + + {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)}, + {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)}, + {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)}, + {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)}, + {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)}, + {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)}, + {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)}, + {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)}, + {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)}, +}; + +#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) +#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) +#define NN_ET_RVEC_GATHER_STATS 7 +#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) +#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ + NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) + +static void nfp_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct nfp_net *nn = netdev_priv(netdev); + + strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, nfp_net_driver_version, + sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d.%d", + nn->fw_ver.resv, nn->fw_ver.class, + nn->fw_ver.major, nn->fw_ver.minor); + strlcpy(drvinfo->bus_info, pci_name(nn->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_stats = NN_ET_STATS_LEN; + drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; +} + +static void nfp_net_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nfp_net *nn = netdev_priv(netdev); + + ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; + ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; + ring->rx_pending = nn->rxd_cnt; + ring->tx_pending = nn->txd_cnt; +} + +static int nfp_net_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 rxd_cnt, txd_cnt; + + if (netif_running(netdev)) { + /* Some NIC drivers allow reconfiguration on the fly, + * some down the interface, change and then up it + * again. For now we don't allow changes when the + * device is up. + */ + nn_warn(nn, "Can't change rings while device is up\n"); + return -EBUSY; + } + + /* We don't have separate queues/rings for small/large frames. */ + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + /* Round up to supported values */ + rxd_cnt = roundup_pow_of_two(ring->rx_pending); + rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS); + rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS); + + txd_cnt = roundup_pow_of_two(ring->tx_pending); + txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS); + txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS); + + if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt) + nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", + nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + + return 0; +} + +static void nfp_net_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { + memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < nn->num_r_vecs; i++) { + sprintf(p, "rvec_%u_rx_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rvec_%u_tx_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rvec_%u_tx_busy", i); + p += ETH_GSTRING_LEN; + } + strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "tx_gather", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "tx_lso", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + for (i = 0; i < nn->num_tx_rings; i++) { + sprintf(p, "txq_%u_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "txq_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < nn->num_rx_rings; i++) { + sprintf(p, "rxq_%u_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rxq_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void nfp_net_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; + struct nfp_net *nn = netdev_priv(netdev); + struct rtnl_link_stats64 *netdev_stats; + struct rtnl_link_stats64 temp = {}; + u64 tmp[NN_ET_RVEC_GATHER_STATS]; + u8 __iomem *io_p; + int i, j, k; + u8 *p; + + netdev_stats = dev_get_stats(netdev, &temp); + + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { + switch (nfp_net_et_stats[i].type) { + case NETDEV_ET_STATS: + p = (char *)netdev_stats + nfp_net_et_stats[i].off; + data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? + *(u64 *)p : *(u32 *)p; + break; + + case NFP_NET_DEV_ET_STATS: + io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; + data[i] = readq(io_p); + break; + } + } + for (j = 0; j < nn->num_r_vecs; j++) { + unsigned int start; + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); + data[i++] = nn->r_vecs[j].rx_pkts; + tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; + tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; + tmp[2] = nn->r_vecs[j].hw_csum_rx_error; + } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); + data[i++] = nn->r_vecs[j].tx_pkts; + data[i++] = nn->r_vecs[j].tx_busy; + tmp[3] = nn->r_vecs[j].hw_csum_tx; + tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; + tmp[5] = nn->r_vecs[j].tx_gather; + tmp[6] = nn->r_vecs[j].tx_lso; + } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); + + for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) + gathered_stats[k] += tmp[k]; + } + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + data[i++] = gathered_stats[j]; + for (j = 0; j < nn->num_tx_rings; j++) { + io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); + data[i++] = readq(io_p); + io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; + data[i++] = readq(io_p); + } + for (j = 0; j < nn->num_rx_rings; j++) { + io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); + data[i++] = readq(io_p); + io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; + data[i++] = readq(io_p); + } +} + +static int nfp_net_get_sset_count(struct net_device *netdev, int sset) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + return NN_ET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +/* RX network flow classification (RSS, filters, etc) + */ +static u32 ethtool_flow_to_nfp_flag(u32 flow_type) +{ + static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = { + [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP, + [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP, + [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP, + [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP, + [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4, + [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6, + }; + + if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp)) + return 0; + + return xlate_ethtool_to_nfp[flow_type]; +} + +static int nfp_net_get_rss_hash_opts(struct nfp_net *nn, + struct ethtool_rxnfc *cmd) +{ + u32 nfp_rss_flag; + + cmd->data = 0; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type); + if (!nfp_rss_flag) + return -EINVAL; + + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (nn->rss_cfg & nfp_rss_flag) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + return 0; +} + +static int nfp_net_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nn->num_rx_rings; + return 0; + case ETHTOOL_GRXFH: + return nfp_net_get_rss_hash_opts(nn, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, + struct ethtool_rxnfc *nfc) +{ + u32 new_rss_cfg = nn->rss_cfg; + u32 nfp_rss_flag; + int err; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + /* RSS only supports IP SA/DA and L4 src/dst ports */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SA/DA fields for hashing */ + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type); + if (!nfp_rss_flag) + return -EINVAL; + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + new_rss_cfg &= ~nfp_rss_flag; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + new_rss_cfg |= nfp_rss_flag; + break; + default: + return -EINVAL; + } + + new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; + new_rss_cfg |= NFP_NET_CFG_RSS_MASK; + + if (new_rss_cfg == nn->rss_cfg) + return 0; + + writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); + if (err) + return err; + + nn->rss_cfg = new_rss_cfg; + + nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg); + return 0; +} + +static int nfp_net_set_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return nfp_net_set_rss_hash_opt(nn, cmd); + default: + return -EOPNOTSUPP; + } +} + +static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return 0; + + return ARRAY_SIZE(nn->rss_itbl); +} + +static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) +{ + return NFP_NET_CFG_RSS_KEY_SZ; +} + +static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + if (indir) + for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) + indir[i] = nn->rss_itbl[i]; + if (key) + memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int nfp_net_set_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) || + !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) { + memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ); + nfp_net_rss_write_key(nn); + } + if (indir) { + for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) + nn->rss_itbl[i] = indir[i]; + + nfp_net_rss_write_itbl(nn); + } + + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); +} + +/* Dump BAR registers + */ +static int nfp_net_get_regs_len(struct net_device *netdev) +{ + return NFP_NET_CFG_BAR_SZ; +} + +static void nfp_net_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 *regs_buf = p; + int i; + + regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); + + for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) + regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); +} + +static int nfp_net_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) + return -EINVAL; + + ec->rx_coalesce_usecs = nn->rx_coalesce_usecs; + ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames; + ec->tx_coalesce_usecs = nn->tx_coalesce_usecs; + ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames; + + return 0; +} + +static int nfp_net_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct nfp_net *nn = netdev_priv(netdev); + unsigned int factor; + + if (ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->me_freq_mhz / 16; + + /* Each pair of (usecs, max_frames) fields specifies that interrupts + * should be coalesced until + * (usecs > 0 && time_since_first_completion >= usecs) || + * (max_frames > 0 && completed_frames >= max_frames) + * + * It is illegal to set both usecs and max_frames to zero as this would + * cause interrupts to never be generated. To disable coalescing, set + * usecs = 0 and max_frames = 1. + * + * Some implementations ignore the value of max_frames and use the + * condition time_since_first_completion >= usecs + */ + + if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) + return -EINVAL; + + /* ensure valid configuration */ + if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames) + return -EINVAL; + + if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) + return -EINVAL; + + if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1)) + return -EINVAL; + + /* configuration is valid */ + nn->rx_coalesce_usecs = ec->rx_coalesce_usecs; + nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames; + nn->tx_coalesce_usecs = ec->tx_coalesce_usecs; + nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames; + + /* write configuration to device */ + nfp_net_coalesce_write_cfg(nn); + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); +} + +static const struct ethtool_ops nfp_net_ethtool_ops = { + .get_drvinfo = nfp_net_get_drvinfo, + .get_ringparam = nfp_net_get_ringparam, + .set_ringparam = nfp_net_set_ringparam, + .get_strings = nfp_net_get_strings, + .get_ethtool_stats = nfp_net_get_stats, + .get_sset_count = nfp_net_get_sset_count, + .get_rxnfc = nfp_net_get_rxnfc, + .set_rxnfc = nfp_net_set_rxnfc, + .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size, + .get_rxfh_key_size = nfp_net_get_rxfh_key_size, + .get_rxfh = nfp_net_get_rxfh, + .set_rxfh = nfp_net_set_rxfh, + .get_regs_len = nfp_net_get_regs_len, + .get_regs = nfp_net_get_regs, + .get_coalesce = nfp_net_get_coalesce, + .set_coalesce = nfp_net_set_coalesce, +}; + +void nfp_net_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nfp_net_ethtool_ops; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c new file mode 100644 index 000000000000..e2b22b8a20f1 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -0,0 +1,385 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_netvf_main.c + * Netronome virtual function network device driver: Main entry point + * Author: Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/etherdevice.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +const char nfp_net_driver_name[] = "nfp_netvf"; +const char nfp_net_driver_version[] = "0.1"; +#define PCI_DEVICE_NFP6000VF 0x6003 +static const struct pci_device_id nfp_netvf_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { 0, } /* Required last entry. */ +}; +MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids); + +static void nfp_netvf_get_mac_addr(struct nfp_net *nn) +{ + u8 mac_addr[ETH_ALEN]; + + put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]); + /* We can't do readw for NFP-3200 compatibility */ + put_unaligned_be16(nn_readl(nn, NFP_NET_CFG_MACADDR + 4) >> 16, + &mac_addr[4]); + + if (!is_valid_ether_addr(mac_addr)) { + eth_hw_addr_random(nn->netdev); + return; + } + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); +} + +static int nfp_netvf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct nfp_net_fw_version fw_ver; + int max_tx_rings, max_rx_rings; + u32 tx_bar_off, rx_bar_off; + u32 tx_bar_sz, rx_bar_sz; + int tx_bar_no, rx_bar_no; + u8 __iomem *ctrl_bar; + struct nfp_net *nn; + int is_nfp3200; + u32 startq; + int stride; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, nfp_net_driver_name); + if (err) { + dev_err(&pdev->dev, "Unable to allocate device memory.\n"); + goto err_pci_disable; + } + + switch (pdev->device) { + case PCI_DEVICE_NFP6000VF: + is_nfp3200 = 0; + break; + default: + err = -ENODEV; + goto err_pci_regions; + } + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); + if (err) + goto err_pci_regions; + + /* Map the Control BAR. + * + * Irrespective of the advertised BAR size we only map the + * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code + * the identical for PF and VF drivers. + */ + ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR), + NFP_NET_CFG_BAR_SZ); + if (!ctrl_bar) { + dev_err(&pdev->dev, + "Failed to map resource %d\n", NFP_NET_CRTL_BAR); + err = -EIO; + goto err_pci_regions; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || + nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) || + nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) { + stride = 2; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = NFP_NET_Q1_BAR; + dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 3: + if (is_nfp3200) { + stride = 2; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = NFP_NET_Q1_BAR; + } else { + stride = 4; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = tx_bar_no; + } + break; + default: + dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + } + + /* Find out how many rings are supported */ + max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); + max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); + + tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride; + rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride; + + /* Sanity checks */ + if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) { + dev_err(&pdev->dev, + "TX BAR too small for number of TX rings. Adjusting\n"); + tx_bar_sz = pci_resource_len(pdev, tx_bar_no); + max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; + } + if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) { + dev_err(&pdev->dev, + "RX BAR too small for number of RX rings. Adjusting\n"); + rx_bar_sz = pci_resource_len(pdev, rx_bar_no); + max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; + } + + /* XXX Implement a workaround for THB-350 here. Ideally, we + * have a different PCI ID for A rev VFs. + */ + switch (pdev->device) { + case PCI_DEVICE_NFP6000VF: + startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tx_bar_off = NFP_PCIE_QUEUE(startq); + startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + rx_bar_off = NFP_PCIE_QUEUE(startq); + break; + default: + err = -ENODEV; + goto err_ctrl_unmap; + } + + /* Allocate and initialise the netdev */ + nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_ctrl_unmap; + } + + nn->fw_ver = fw_ver; + nn->ctrl_bar = ctrl_bar; + nn->is_vf = 1; + nn->is_nfp3200 = is_nfp3200; + nn->stride_tx = stride; + nn->stride_rx = stride; + + if (rx_bar_no == tx_bar_no) { + u32 bar_off, bar_sz; + resource_size_t map_addr; + + /* Make a single overlapping BAR mapping */ + if (tx_bar_off < rx_bar_off) + bar_off = tx_bar_off; + else + bar_off = rx_bar_off; + + if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz)) + bar_sz = (tx_bar_off + tx_bar_sz) - bar_off; + else + bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; + + map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; + nn->q_bar = ioremap_nocache(map_addr, bar_sz); + if (!nn->q_bar) { + nn_err(nn, "Failed to map resource %d\n", tx_bar_no); + err = -EIO; + goto err_netdev_free; + } + + /* TX queues */ + nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off); + /* RX queues */ + nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off); + } else { + resource_size_t map_addr; + + /* TX queues */ + map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; + nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); + if (!nn->tx_bar) { + nn_err(nn, "Failed to map resource %d\n", tx_bar_no); + err = -EIO; + goto err_netdev_free; + } + + /* RX queues */ + map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; + nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); + if (!nn->rx_bar) { + nn_err(nn, "Failed to map resource %d\n", rx_bar_no); + err = -EIO; + goto err_unmap_tx; + } + } + + nfp_netvf_get_mac_addr(nn); + + err = nfp_net_irqs_alloc(nn); + if (!err) { + nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); + err = -EIO; + goto err_unmap_rx; + } + + /* Get ME clock frequency from ctrl BAR + * XXX for now frequency is hardcoded until we figure out how + * to get the value from nfp-hwinfo into ctrl bar + */ + nn->me_freq_mhz = 1200; + + err = nfp_net_netdev_init(nn->netdev); + if (err) + goto err_irqs_disable; + + pci_set_drvdata(pdev, nn); + + nfp_net_info(nn); + nfp_net_debugfs_adapter_add(nn); + + return 0; + +err_irqs_disable: + nfp_net_irqs_disable(nn); +err_unmap_rx: + if (!nn->q_bar) + iounmap(nn->rx_bar); +err_unmap_tx: + if (!nn->q_bar) + iounmap(nn->tx_bar); + else + iounmap(nn->q_bar); +err_netdev_free: + pci_set_drvdata(pdev, NULL); + nfp_net_netdev_free(nn); +err_ctrl_unmap: + iounmap(ctrl_bar); +err_pci_regions: + pci_release_regions(pdev); +err_pci_disable: + pci_disable_device(pdev); + return err; +} + +static void nfp_netvf_pci_remove(struct pci_dev *pdev) +{ + struct nfp_net *nn = pci_get_drvdata(pdev); + + /* Note, the order is slightly different from above as we need + * to keep the nn pointer around till we have freed everything. + */ + nfp_net_debugfs_adapter_del(nn); + + nfp_net_netdev_clean(nn->netdev); + + nfp_net_irqs_disable(nn); + + if (!nn->q_bar) { + iounmap(nn->rx_bar); + iounmap(nn->tx_bar); + } else { + iounmap(nn->q_bar); + } + iounmap(nn->ctrl_bar); + + pci_set_drvdata(pdev, NULL); + + nfp_net_netdev_free(nn); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nfp_netvf_pci_driver = { + .name = nfp_net_driver_name, + .id_table = nfp_netvf_pci_device_ids, + .probe = nfp_netvf_pci_probe, + .remove = nfp_netvf_pci_remove, +}; + +static int __init nfp_netvf_init(void) +{ + int err; + + pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n", + nfp_net_driver_name); + + nfp_net_debugfs_create(); + err = pci_register_driver(&nfp_netvf_pci_driver); + if (err) { + nfp_net_debugfs_destroy(); + return err; + } + + return 0; +} + +static void __exit nfp_netvf_exit(void) +{ + pci_unregister_driver(&nfp_netvf_pci_driver); + nfp_net_debugfs_destroy(); +} + +module_init(nfp_netvf_init); +module_exit(nfp_netvf_exit); + +MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("NFP VF network device driver"); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 057665180f13..b1ce7aaa8f8b 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -797,7 +797,7 @@ static int lpc_mii_probe(struct net_device *ndev) netdev_info(ndev, "using MII interface\n"); else netdev_info(ndev, "using RMII interface\n"); - phydev = phy_connect(ndev, dev_name(&phydev->dev), + phydev = phy_connect(ndev, phydev_name(phydev), &lpc_handle_link_change, lpc_phy_interface_mode(&pldat->pdev->dev)); @@ -816,15 +816,14 @@ static int lpc_mii_probe(struct net_device *ndev) pldat->duplex = -1; pldat->phy_dev = phydev; - netdev_info(ndev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); + return 0; } static int lpc_mii_init(struct netdata_local *pldat) { - int err = -ENXIO, i; + int err = -ENXIO; pldat->mii_bus = mdiobus_alloc(); if (!pldat->mii_bus) { @@ -851,19 +850,10 @@ static int lpc_mii_init(struct netdata_local *pldat) pldat->mii_bus->priv = pldat; pldat->mii_bus->parent = &pldat->pdev->dev; - pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!pldat->mii_bus->irq) { - err = -ENOMEM; - goto err_out_1; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - pldat->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(pldat->pdev, pldat->mii_bus); if (mdiobus_register(pldat->mii_bus)) - goto err_out_free_mdio_irq; + goto err_out_unregister_bus; if (lpc_mii_probe(pldat->ndev) != 0) goto err_out_unregister_bus; @@ -872,9 +862,6 @@ static int lpc_mii_init(struct netdata_local *pldat) err_out_unregister_bus: mdiobus_unregister(pldat->mii_bus); -err_out_free_mdio_irq: - kfree(pldat->mii_bus->irq); -err_out_1: mdiobus_free(pldat->mii_bus); err_out: return err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 08d4be616064..e097e6baaac4 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -500,7 +500,7 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) val = XsumTX; pch_gbe_validate_option(&val, &opt, adapter); if (!val) - dev->features &= ~NETIF_F_ALL_CSUM; + dev->features &= ~NETIF_F_CSUM_MASK; } { /* Flow Control */ static const struct pch_gbe_option opt = { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ac17d8669b1a..1292c360390c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -299,6 +299,7 @@ struct qed_hwfn { /* Flag indicating whether interrupts are enabled or not*/ bool b_int_enabled; + bool b_int_requested; struct qed_mcp_info *mcp_info; @@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); +int qed_slowpath_irq_req(struct qed_hwfn *hwfn); + #define QED_ETH_INTERFACE_VERSION 300 #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 803b190ccada..817bbd5476ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1385,52 +1385,63 @@ err0: return rc; } -static u32 qed_hw_bar_size(struct qed_dev *cdev, - u8 bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + u8 bar_id) { - u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); + u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE + : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); - return size / cdev->num_hwfns; + /* Get the BAR size(in KB) from hardware given val */ + return 1 << (val + 15); } int qed_hw_prepare(struct qed_dev *cdev, int personality) { - int rc, i; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + int rc; /* Store the precompiled init data ptrs */ qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ - rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, + rc = qed_hw_prepare_single(p_hwfn, + cdev->regview, cdev->doorbells, personality); if (rc) return rc; - personality = cdev->hwfns[0].hw_info.personality; + personality = p_hwfn->hw_info.personality; /* Initialize the rest of the hwfns */ - for (i = 1; i < cdev->num_hwfns; i++) { + if (cdev->num_hwfns > 1) { void __iomem *p_regview, *p_doorbell; + u8 __iomem *addr; + + /* adjust bar offset for second engine */ + addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; + p_regview = addr; - p_regview = cdev->regview + - i * qed_hw_bar_size(cdev, 0); - p_doorbell = cdev->doorbells + - i * qed_hw_bar_size(cdev, 1); - rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, + /* adjust doorbell bar offset for second engine */ + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; + p_doorbell = addr; + + /* prepare second hw function */ + rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, p_doorbell, personality); + + /* in case of error, need to free the previously + * initiliazed hwfn 0. + */ if (rc) { - /* Cleanup previously initialized hwfns */ - while (--i >= 0) { - qed_init_free(&cdev->hwfns[i]); - qed_mcp_free(&cdev->hwfns[i]); - qed_hw_hwfn_free(&cdev->hwfns[i]); - } - return rc; + qed_init_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); } } - return 0; + return rc; } void qed_hw_remove(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b2f8e854dfd1..264e954675d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3993,6 +3993,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -4044,6 +4046,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 #define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index de50e84902af..9cc9d62c1fec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } -void qed_int_igu_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode) { - int i; - - p_hwfn->b_int_enabled = 1; + int rc, i; /* Mask non-link attentions */ for (i = 0; i < 9; i++) qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); - /* Enable interrupt Generation */ - qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); - /* Configure AEU signal change to produce attentions for link */ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); @@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = qed_slowpath_irq_req(p_hwfn); + if (rc != 0) { + DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); + return -EINVAL; + } + p_hwfn->b_int_requested = true; + } + /* Enable interrupt Generation */ + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + p_hwfn->b_int_enabled = 1; + + return rc; } void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, @@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, return info->igu_sb_cnt; } + +void qed_int_disable_post_isr_release(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + cdev->hwfns[i].b_int_requested = false; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 16b57518e706..51e0b09a7f47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, int *p_iov_blks); /** - * @file + * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * release. The API need to be called after releasing all slowpath IRQs + * of the device. + * + * @param cdev * - * @brief Interrupt handler */ +void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0 @@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param int_mode + * + * @return int */ -void qed_int_igu_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode); +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); /** * @brief - Initialize CAU status block entry diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 947c7af72b25..9d76ce249277 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) return rc; } -static int qed_slowpath_irq_req(struct qed_dev *cdev) +int qed_slowpath_irq_req(struct qed_hwfn *hwfn) { - int i = 0, rc = 0; + struct qed_dev *cdev = hwfn->cdev; + int rc = 0; + u8 id; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - /* Request all the slowpath MSI-X vectors */ - for (i = 0; i < cdev->num_hwfns; i++) { - snprintf(cdev->hwfns[i].name, NAME_SIZE, - "sp-%d-%02x:%02x.%02x", - i, cdev->pdev->bus->number, - PCI_SLOT(cdev->pdev->devfn), - cdev->hwfns[i].abs_pf_id); - - rc = request_irq(cdev->int_params.msix_table[i].vector, - qed_msix_sp_int, 0, - cdev->hwfns[i].name, - cdev->hwfns[i].sp_dpc); - if (rc) - break; - - DP_VERBOSE(&cdev->hwfns[i], - (NETIF_MSG_INTR | QED_MSG_SP), + id = hwfn->my_id; + snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", + id, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + rc = request_irq(cdev->int_params.msix_table[id].vector, + qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); + if (!rc) + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), "Requested slowpath MSI-X\n"); - } - - if (i != cdev->num_hwfns) { - /* Free already request MSI-X vectors */ - for (i--; i >= 0; i--) { - unsigned int vec = - cdev->int_params.msix_table[i].vector; - synchronize_irq(vec); - free_irq(cdev->int_params.msix_table[i].vector, - cdev->hwfns[i].sp_dpc); - } - } } else { unsigned long flags = 0; @@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].b_int_requested) + break; synchronize_irq(cdev->int_params.msix_table[i].vector); free_irq(cdev->int_params.msix_table[i].vector, cdev->hwfns[i].sp_dpc); } } else { - free_irq(cdev->pdev->irq, cdev); + if (QED_LEADING_HWFN(cdev)->b_int_requested) + free_irq(cdev->pdev->irq, cdev); } + qed_int_disable_post_isr_release(cdev); } static int qed_nic_stop(struct qed_dev *cdev) @@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (rc) goto err1; - /* Request the slowpath IRQ */ - rc = qed_slowpath_irq_req(cdev); - if (rc) - goto err2; - /* Allocate stream for unzipping */ rc = qed_alloc_stream_mem(cdev); if (rc) { DP_NOTICE(cdev, "Failed to allocate stream memory\n"); - goto err3; + goto err2; } /* Start the slowpath */ @@ -1135,6 +1115,23 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_set_led(hwfn, ptt, mode); + + qed_ptt_release(hwfn, ptt); + + return status; +} + const struct qed_common_ops qed_common_ops_pass = { .probe = &qed_probe, .remove = &qed_remove, @@ -1155,6 +1152,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .set_led = &qed_set_led, }; u32 qed_get_protocol_version(enum qed_protocol protocol) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 20d048cdcb88..ba1b1f1ef789 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -858,3 +858,30 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return 0; } + +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_led_mode mode) +{ + u32 resp = 0, param = 0, drv_mb_param; + int rc; + + switch (mode) { + case QED_LED_MODE_ON: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; + break; + case QED_LED_MODE_OFF: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; + break; + case QED_LED_MODE_RESTORE: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; + break; + default: + DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, + drv_mb_param, &resp, ¶m); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dbaae586b4a7..506197d5c3dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -224,6 +224,19 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver); +/** + * @brief Set LED status + * + * @param p_hwfn + * @param p_ptt + * @param mode - LED mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_led_mode mode); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 7a5ce5914ace..e8df12335a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -363,4 +363,8 @@ 0x7 << 0) #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 0 +#define PGLUE_B_REG_PF_BAR0_SIZE \ + 0x2aae60UL +#define PGLUE_B_REG_PF_BAR1_SIZE \ + 0x2aae64UL #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 31a1f1eb4f56..287fadfab52d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -124,8 +124,12 @@ struct qed_spq { dma_addr_t p_phys; struct qed_spq_entry *p_virt; - /* Used as index for completions (returns on EQ by FW) */ - u16 echo_idx; +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) + + /* Bitmap for handling out-of-order completions */ + DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); + u8 comp_bitmap_idx; /* Statistics */ u32 unlimited_pending_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 7c0b8459666e..3dd548ab8df1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -112,8 +112,6 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { - p_ent->elem.hdr.echo = 0; - p_hwfn->p_spq->echo_idx++; p_ent->flags = 0; switch (p_ent->comp_mode) { @@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { - struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; struct core_db_data db; + p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); if (!elem) { DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); @@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_spq->comp_count = 0; p_spq->comp_sent_count = 0; p_spq->unlimited_pending_count = 0; - p_spq->echo_idx = 0; + + bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); + p_spq->comp_bitmap_idx = 0; /* SPQ cid, cannot fail */ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); @@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - struct qed_spq_entry *p_en2; if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; return 0; - } + } else { + struct qed_spq_entry *p_en2; - p_en2 = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); - list_del(&p_en2->list); + p_en2 = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, + list); + list_del(&p_en2->list); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; - /* Strcut assignment */ - *p_en2 = *p_ent; + *p_en2 = *p_ent; - kfree(p_ent); + kfree(p_ent); - p_ent = p_en2; + p_ent = p_en2; + } } /* entry is to be placed in 'pending' queue */ @@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + /* Avoid overriding of SPQ entries when getting + * out-of-order completions, by marking the completions + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ + bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + bitmap_clear(p_spq->p_comp_bitmap, + p_spq->comp_bitmap_idx, + SPQ_RING_SIZE); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } + p_spq->comp_count++; found = p_ent; break; } + + /* This is relatively uncommon - depends on scenarios + * which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", + le16_to_cpu(echo), + le16_to_cpu(p_ent->elem.hdr.echo)); } /* Release lock before callback, as callback may post diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index ea00d5f3bab4..7c6caf7f6612 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -116,6 +116,7 @@ struct qede_dev { (edev)->dev_info.num_tc) struct qede_fastpath *fp_array; + u16 req_rss; u16 num_rss; u8 num_tc; #define QEDE_RSS_CNT(edev) ((edev)->num_rss) @@ -269,13 +270,13 @@ int qede_change_mtu(struct net_device *dev, int new_mtu); void qede_fill_by_demand_stats(struct qede_dev *edev); #define RX_RING_SIZE_POW 13 -#define RX_RING_SIZE BIT(RX_RING_SIZE_POW) +#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX #define TX_RING_SIZE_POW 13 -#define TX_RING_SIZE BIT(TX_RING_SIZE_POW) +#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 3a362476a22c..e442b85c9a5e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -322,6 +322,30 @@ static void qede_set_msglevel(struct net_device *ndev, u32 level) dp_module, dp_level); } +static int qede_nway_reset(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params link_params; + + if (!netif_running(dev)) + return 0; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + if (!current_link.link_up) + return 0; + + /* Toggle the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); + link_params.link_up = true; + edev->ops->common->set_link(edev->cdev, &link_params); + + return 0; +} + static u32 qede_get_link(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); @@ -333,6 +357,106 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static void qede_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct qede_dev *edev = netdev_priv(dev); + + ering->rx_max_pending = NUM_RX_BDS_MAX; + ering->rx_pending = edev->q_num_rx_buffers; + ering->tx_max_pending = NUM_TX_BDS_MAX; + ering->tx_pending = edev->q_num_tx_buffers; +} + +static int qede_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", + ering->rx_pending, ering->tx_pending); + + /* Validate legality of configuration */ + if (ering->rx_pending > NUM_RX_BDS_MAX || + ering->rx_pending < NUM_RX_BDS_MIN || + ering->tx_pending > NUM_TX_BDS_MAX || + ering->tx_pending < NUM_TX_BDS_MIN) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", + NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, + NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); + return -EINVAL; + } + + /* Change ring size and re-load */ + edev->q_num_rx_buffers = ering->rx_pending; + edev->q_num_tx_buffers = ering->tx_pending; + + if (netif_running(edev->ndev)) + qede_reload(edev, NULL, NULL); + + return 0; +} + +static void qede_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + epause->autoneg = true; + if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) + epause->rx_pause = true; + if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) + epause->tx_pause = true; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, + epause->tx_pause); +} + +static int qede_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_params params; + struct qed_link_output current_link; + + if (!edev->dev_info.common.is_mf) { + DP_INFO(edev, + "Pause parameters can not be updated in non-default mode\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (epause->autoneg) { + if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + DP_INFO(edev, "autoneg not supported\n"); + return -EINVAL; + } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + } + if (epause->rx_pause) + params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; + if (epause->tx_pause) + params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) { edev->ndev->mtu = args->mtu; @@ -366,17 +490,104 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu) return 0; } +static void qede_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qede_dev *edev = netdev_priv(dev); + + channels->max_combined = QEDE_MAX_RSS_CNT(edev); + channels->combined_count = QEDE_RSS_CNT(edev); +} + +static int qede_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", + channels->rx_count, channels->tx_count, + channels->other_count, channels->combined_count); + + /* We don't support separate rx / tx, nor `other' channels. */ + if (channels->rx_count || channels->tx_count || + channels->other_count || (channels->combined_count == 0) || + (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "command parameters not supported\n"); + return -EINVAL; + } + + /* Check if there was a change in the active parameters */ + if (channels->combined_count == QEDE_RSS_CNT(edev)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "No change in active parameters\n"); + return 0; + } + + /* We need the number of queues to be divisible between the hwfns */ + if (channels->combined_count % edev->dev_info.common.num_hwfns) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Number of channels must be divisable by %04x\n", + edev->dev_info.common.num_hwfns); + return -EINVAL; + } + + /* Set number of queues and reload if necessary */ + edev->req_rss = channels->combined_count; + if (netif_running(dev)) + qede_reload(edev, NULL, NULL); + + return 0; +} + +static int qede_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct qede_dev *edev = netdev_priv(dev); + u8 led_state = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + led_state = QED_LED_MODE_ON; + break; + + case ETHTOOL_ID_OFF: + led_state = QED_LED_MODE_OFF; + break; + + case ETHTOOL_ID_INACTIVE: + led_state = QED_LED_MODE_RESTORE; + break; + } + + edev->ops->common->set_led(edev->cdev, led_state); + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, + .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_pauseparam = qede_get_pauseparam, + .set_pauseparam = qede_set_pauseparam, .get_strings = qede_get_strings, + .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, .get_sset_count = qede_get_sset_count, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, }; void qede_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f4657a2e730a..6237f10b5119 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1502,8 +1502,11 @@ static int qede_set_num_queues(struct qede_dev *edev) u16 rss_num; /* Setup queues according to possible resources*/ - rss_num = netif_get_num_default_rss_queues() * - edev->dev_info.common.num_hwfns; + if (edev->req_rss) + rss_num = edev->req_rss; + else + rss_num = netif_get_num_default_rss_queues() * + edev->dev_info.common.num_hwfns; rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index be7d7a62cc0d..34906750b7e7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -246,12 +246,13 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) u32 state; state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); - while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { + idc->vnic_wait_limit--; msleep(1000); state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); } - if (!idc->vnic_wait_limit) { + if (state != QLCNIC_DEV_NPAR_OPER) { dev_err(&adapter->pdev->dev, "vNIC mode not operational, state check timed out.\n"); return -EIO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index a5f422f26cb4..daf05155b732 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -772,8 +772,10 @@ int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) int i, err = 0; for (i = 0; i < ahw->num_msix; i++) { - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_MQ_TX_CONFIG_INTR); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_MQ_TX_CONFIG_INTR); + if (err) + return err; type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (ahw->intr_tbl[i].type << 4); if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index a72bcddf160a..4b76c69fe86d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -167,7 +167,7 @@ struct qlcnic_dcb_cfg { u32 version; }; -static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { +static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, @@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .aen_handler = qlcnic_83xx_dcb_aen_handler, }; -static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { +static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 3cf4a10fbe1e..9777e5713525 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -37,7 +37,7 @@ struct qlcnic_dcb { struct qlcnic_adapter *adapter; struct delayed_work aen_work; struct workqueue_struct *wq; - struct qlcnic_dcb_ops *ops; + const struct qlcnic_dcb_ops *ops; struct qlcnic_dcb_cfg *cfg; unsigned long state; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index d4b5085a21fa..7bd6f25b4625 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1604,7 +1604,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, + netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, NAPI_POLL_WEIGHT); } } @@ -2135,7 +2135,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_napi_add(netdev, &tx_ring->napi, + netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll, NAPI_POLL_WEIGHT); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 02b7115b6aaa..997976426799 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) /* Wait for an outstanding reset to complete. */ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { - int i = 3; - while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 4; + + while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { netif_err(qdev, ifup, qdev->ndev, "Waiting for adapter UP...\n"); ssleep(1); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ddb2c6c6ec94..689a4a5c8dcf 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", jiffies, jiffies - dev->trans_start); qca->net_dev->stats.tx_errors++; - /* wake the queue if there is room */ - if (qcaspi_tx_ring_has_space(&qca->txr)) - netif_wake_queue(dev); + /* Trigger tx queue flush and QCA7000 reset */ + qca->sync = QCASPI_SYNC_UNKNOWN; } static int diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 9a37247cf4b8..6b541e57c96a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1039,7 +1039,7 @@ static int r6040_mii_probe(struct net_device *dev) return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, + phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1061,9 +1061,7 @@ static int r6040_mii_probe(struct net_device *dev) lp->old_link = 0; lp->old_duplex = -1; - dev_info(&lp->pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -1077,7 +1075,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; int bar = 0; u16 *adrp; - int i; pr_info("%s\n", version); @@ -1189,19 +1186,11 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lp->mii_bus->name = "r6040_eth_mii"; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dev_name(&pdev->dev), card_idx); - lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_mdio; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(lp->mii_bus); if (err) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_out_mdio_irq; + goto err_out_mdio; } err = r6040_mii_probe(dev); @@ -1220,8 +1209,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); -err_out_mdio_irq: - kfree(lp->mii_bus->irq); err_out_mdio: mdiobus_free(lp->mii_bus); err_out_unmap: @@ -1244,7 +1231,6 @@ static void r6040_remove_one(struct pci_dev *pdev) unregister_netdev(dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); pci_iounmap(pdev, lp->base); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 79ef799f88ab..17d5571d0432 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3894,7 +3894,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) /* disable phy pfm mode */ rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); /* Check ALDPS bit, disable it if enabled */ @@ -3947,7 +3947,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || - (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) { + (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) { rtl_writephy(tp, 0x1f, 0x0bcf); rtl_writephy(tp, 0x16, data); rtl_writephy(tp, 0x1f, 0x0000); @@ -3967,7 +3967,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) /* disable phy pfm mode */ rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); /* Check ALDPS bit, disable it if enabled */ @@ -5818,11 +5818,10 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { - { 0x0b, ~0, 0x48 }, - { 0x19, 0x20, 0x50 }, - { 0x0c, ~0, 0x20 } + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 } }; - int i; rtl_csi_access_enable_1(tp); @@ -5830,13 +5829,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) RTL_W8(MaxTxPacketSize, TxPacketMax); - for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { - const struct ephy_info *e = e_info_8168d_4 + i; - u16 w; - - w = rtl_ephy_read(tp, e->offset); - rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); - } + rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); rtl_enable_clock_request(pdev); } @@ -6127,7 +6120,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); @@ -6136,7 +6129,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_pcie_state_l2l3_enable(tp, false); rtl_writephy(tp, 0x1f, 0x0c42); - rg_saw_cnt = rtl_readphy(tp, 0x13); + rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff); rtl_writephy(tp, 0x1f, 0x0000); if (rg_saw_cnt > 0) { u16 sw_cnt_1ms_ini; @@ -6252,7 +6245,7 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) rtl_hw_start_8168ep(tp); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -6274,7 +6267,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) rtl_hw_start_8168ep(tp); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); data = r8168_mac_ocp_read(tp, 0xd3e2); data &= 0xf000; diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 0623fff932e4..9fbe92ac225b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -206,6 +206,7 @@ enum CCC_BIT { CCC_OPC_RESET = 0x00000000, CCC_OPC_CONFIG = 0x00000001, CCC_OPC_OPERATION = 0x00000002, + CCC_GAC = 0x00000080, CCC_DTSR = 0x00000100, CCC_CSEL = 0x00030000, CCC_CSEL_HPB = 0x00010000, @@ -576,6 +577,9 @@ enum GTI_BIT { GTI_TIV = 0x0FFFFFFF, }; +#define GTI_TIV_MAX GTI_TIV +#define GTI_TIV_MIN 0x20 + /* GIC */ enum GIC_BIT { GIC_PTCE = 0x00000001, /* Undocumented? */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ed5da4d47668..ac43ed914fcf 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -32,6 +32,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include <asm/div64.h> + #include "ravb.h" #define RAVB_DEF_MSG_ENABLE \ @@ -113,12 +115,15 @@ static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) if (mac) { ether_addr_copy(ndev->dev_addr, mac); } else { - ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24); - ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF; - ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF; - ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF; - ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF; - ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF; + u32 mahr = ravb_read(ndev, MAHR); + u32 malr = ravb_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; } } @@ -338,16 +343,13 @@ error: static void ravb_emac_init(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr; /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); /* PAUSE prohibition */ - ecmr = ravb_read(ndev, ECMR); - ecmr &= ECMR_DM; - ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - ravb_write(ndev, ecmr, ECMR); + ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); ravb_set_rate(ndev); @@ -405,9 +407,11 @@ static int ravb_dmac_init(struct net_device *ndev) /* Timestamp enable */ ravb_write(ndev, TCCR_TFEN, TCCR); - /* Interrupt enable: */ + /* Interrupt init: */ /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0, RIC1); /* Receive FIFO full error, descriptor empty */ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ @@ -875,6 +879,7 @@ static int ravb_phy_init(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev; struct device_node *pn; + int err; priv->link = 0; priv->speed = 0; @@ -882,6 +887,17 @@ static int ravb_phy_init(struct net_device *ndev) /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); + if (!pn) { + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + return err; + } + pn = of_node_get(np); + } phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, priv->phy_interface); if (!phydev) { @@ -905,8 +921,10 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + /* 10BASE is not supported */ + phydev->supported &= ~PHY_10BT_FEATURES; + + phy_attached_info(phydev); priv->phydev = phydev; @@ -1037,7 +1055,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_1_mcast_packets", "rx_queue_1_errors", "rx_queue_1_crc_errors", - "rx_queue_1_frame_errors_", + "rx_queue_1_frame_errors", "rx_queue_1_length_errors", "rx_queue_1_missed_errors", "rx_queue_1_over_errors", @@ -1229,7 +1247,8 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1242,7 +1261,8 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); out_free_irq2: if (priv->chip_id == RCAR_GEN3) free_irq(priv->emac_irq, ndev); @@ -1471,12 +1491,12 @@ static int ravb_close(struct net_device *ndev) /* Disable interrupts by clearing the interrupt masks. */ ravb_write(ndev, 0, RIC0); - ravb_write(ndev, 0, RIC1); ravb_write(ndev, 0, RIC2); ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ if (ravb_stop_dma(ndev) < 0) @@ -1656,11 +1676,45 @@ static int ravb_mdio_release(struct ravb_private *priv) static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); +static int ravb_set_gti(struct net_device *ndev) +{ + + struct device *dev = ndev->dev.parent; + struct device_node *np = dev->of_node; + unsigned long rate; + struct clk *clk; + uint64_t inc; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + dev_err(dev, "could not get clock\n"); + return PTR_ERR(clk); + } + + rate = clk_get_rate(clk); + clk_put(clk); + + inc = 1000000000ULL << 20; + do_div(inc, rate); + + if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { + dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", + inc, GTI_TIV_MIN, GTI_TIV_MAX); + return -EINVAL; + } + + ravb_write(ndev, inc, GTI); + + return 0; +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1749,15 +1803,25 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, - CCC); + if (chip_id == RCAR_GEN2) { + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | + CCC_OPC_CONFIG, CCC); + /* Set CSEL value */ + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | + CCC_CSEL_HPB, CCC); + } else { + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | + CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); + } /* Set CSEL value */ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, CCC); /* Set GTI value */ - ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI); + error = ravb_set_gti(ndev); + if (error) + goto out_release; /* Request GTI loading */ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); @@ -1780,6 +1844,10 @@ static int ravb_probe(struct platform_device *pdev) /* Initialise HW timestamp list */ INIT_LIST_HEAD(&priv->ts_skb_list); + /* Initialise PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_init(ndev, pdev); + /* Debug message level */ priv->msg_enable = RAVB_DEF_MSG_ENABLE; @@ -1821,6 +1889,10 @@ out_napi_del: out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); + + /* Stop PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); out_release: if (ndev) free_netdev(ndev); @@ -1835,6 +1907,10 @@ static int ravb_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); + /* Stop PTP Clock driver */ + if (priv->chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); /* Set reset mode */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e7bab7909ed9..dfa9e59c9442 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -52,6 +52,8 @@ NETIF_MSG_RX_ERR| \ NETIF_MSG_TX_ERR) +#define SH_ETH_OFFSET_INVALID ((u16)~0) + #define SH_ETH_OFFSET_DEFAULTS \ [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID @@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { static void sh_eth_rcv_snd_disable(struct net_device *ndev); static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); +static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); +} + +static u32 sh_eth_read(struct net_device *ndev, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->addr + offset); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -449,6 +473,109 @@ static void sh_eth_set_duplex(struct net_device *ndev) sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); +} + +static void sh_eth_set_rate_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + default: + break; + } +} + +#ifdef CONFIG_OF +/* R7S72100 */ +static struct sh_eth_cpu_data r7s72100_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + + .register_type = SH_ETH_REG_FAST_RZ, + + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = 0xff7f009f, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .no_psr = 1, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .hw_crc = 1, + .tsu = 1, + .shift_rd0 = 1, +}; + +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); + + sh_eth_select_mii(ndev); +} + +/* R8A7740 */ +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .select_mii = 1, + .shift_rd0 = 1, +}; + /* There is CPU dependent code */ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { @@ -514,6 +641,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .hw_swap = 1, .rmiimode = 1, }; +#endif /* CONFIG_OF */ static void sh_eth_set_rate_sh7724(struct net_device *ndev) { @@ -671,34 +799,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .tsu = 1, }; -static void sh_eth_chip_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); - mdelay(1); -} - -static void sh_eth_set_rate_gether(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } -} - /* SH7734 */ static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, @@ -756,80 +856,6 @@ static struct sh_eth_cpu_data sh7763_data = { .irq_flags = IRQF_SHARED, }; -static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); - mdelay(1); - - sh_eth_select_mii(ndev); -} - -/* R8A7740 */ -static struct sh_eth_cpu_data r8a7740_data = { - .chip_reset = sh_eth_chip_reset_r8a7740, - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate_gether, - - .register_type = SH_ETH_REG_GIGABIT, - - .ecsr_value = ECSR_ICD | ECSR_MPD, - .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, - - .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, - .fdr_value = 0x0000070f, - - .apr = 1, - .mpr = 1, - .tpauser = 1, - .bculr = 1, - .hw_swap = 1, - .rpadir = 1, - .rpadir_value = 2 << 16, - .no_trimd = 1, - .no_ade = 1, - .tsu = 1, - .select_mii = 1, - .shift_rd0 = 1, -}; - -/* R7S72100 */ -static struct sh_eth_cpu_data r7s72100_data = { - .chip_reset = sh_eth_chip_reset, - .set_duplex = sh_eth_set_duplex, - - .register_type = SH_ETH_REG_FAST_RZ, - - .ecsr_value = ECSR_ICD, - .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xff7f009f, - - .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, - .fdr_value = 0x0000070f, - - .no_psr = 1, - .apr = 1, - .mpr = 1, - .tpauser = 1, - .hw_swap = 1, - .rpadir = 1, - .rpadir_value = 2 << 16, - .no_trimd = 1, - .no_ade = 1, - .hw_crc = 1, - .tsu = 1, - .shift_rd0 = 1, -}; - static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, @@ -941,30 +967,6 @@ static void sh_eth_set_receive_align(struct sk_buff *skb) skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); } - -/* CPU <-> EDMAC endian convert */ -static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) -{ - switch (mdp->edmac_endian) { - case EDMAC_LITTLE_ENDIAN: - return cpu_to_le32(x); - case EDMAC_BIG_ENDIAN: - return cpu_to_be32(x); - } - return x; -} - -static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) -{ - switch (mdp->edmac_endian) { - case EDMAC_LITTLE_ENDIAN: - return le32_to_cpu(x); - case EDMAC_BIG_ENDIAN: - return be32_to_cpu(x); - } - return x; -} - /* Program the hardware MAC address from dev->dev_addr. */ static void update_mac_address(struct net_device *ndev) { @@ -987,12 +989,15 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac) if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { memcpy(ndev->dev_addr, mac, ETH_ALEN); } else { - ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); - ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; - ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; - ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); - ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; - ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); + u32 mahr = sh_eth_read(ndev, MAHR); + u32 malr = sh_eth_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; } } @@ -1008,56 +1013,34 @@ struct bb_info { void (*set_gate)(void *addr); struct mdiobb_ctrl ctrl; void *addr; - u32 mmd_msk;/* MMD */ - u32 mdo_msk; - u32 mdi_msk; - u32 mdc_msk; }; -/* PHY bit set */ -static void bb_set(void *addr, u32 msk) +static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) { - iowrite32(ioread32(addr) | msk, addr); -} + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + u32 pir; -/* PHY bit clear */ -static void bb_clr(void *addr, u32 msk) -{ - iowrite32((ioread32(addr) & ~msk), addr); -} + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); -/* PHY bit read */ -static int bb_read(void *addr, u32 msk) -{ - return (ioread32(addr) & msk) != 0; + pir = ioread32(bitbang->addr); + if (set) + pir |= mask; + else + pir &= ~mask; + iowrite32(pir, bitbang->addr); } /* Data I/O pin control */ static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mmd_msk); - else - bb_clr(bitbang->addr, bitbang->mmd_msk); + sh_mdio_ctrl(ctrl, PIR_MMD, bit); } /* Set bit data*/ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mdo_msk); - else - bb_clr(bitbang->addr, bitbang->mdo_msk); + sh_mdio_ctrl(ctrl, PIR_MDO, bit); } /* Get bit data*/ @@ -1068,21 +1051,13 @@ static int sh_get_mdio(struct mdiobb_ctrl *ctrl) if (bitbang->set_gate) bitbang->set_gate(bitbang->addr); - return bb_read(bitbang->addr, bitbang->mdi_msk); + return (ioread32(bitbang->addr) & PIR_MDI) != 0; } /* MDC pin control */ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mdc_msk); - else - bb_clr(bitbang->addr, bitbang->mdc_msk); + sh_mdio_ctrl(ctrl, PIR_MDC, bit); } /* mdio bus control struct */ @@ -1143,6 +1118,7 @@ static void sh_eth_ring_format(struct net_device *ndev) int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; + u32 buf_len; mdp->cur_rx = 0; mdp->cur_tx = 0; @@ -1163,17 +1139,17 @@ static void sh_eth_ring_format(struct net_device *ndev) /* RX descriptor */ rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 32 bytes. */ - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); - dma_addr = dma_map_single(&ndev->dev, skb->data, - rxdesc->buffer_length, + buf_len = ALIGN(mdp->rx_buf_sz, 32); + rxdesc->len = cpu_to_le32(buf_len << 16); + dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { kfree_skb(skb); break; } mdp->rx_skbuff[i] = skb; - rxdesc->addr = dma_addr; - rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); + rxdesc->addr = cpu_to_le32(dma_addr); + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); /* Rx descriptor address set */ if (i == 0) { @@ -1187,7 +1163,7 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); + rxdesc->status |= cpu_to_le32(RD_RDLE); memset(mdp->tx_ring, 0, tx_ringsize); @@ -1195,8 +1171,8 @@ static void sh_eth_ring_format(struct net_device *ndev) for (i = 0; i < mdp->num_tx_ring; i++) { mdp->tx_skbuff[i] = NULL; txdesc = &mdp->tx_ring[i]; - txdesc->status = cpu_to_edmac(mdp, TD_TFP); - txdesc->buffer_length = 0; + txdesc->status = cpu_to_le32(TD_TFP); + txdesc->len = cpu_to_le32(0); if (i == 0) { /* Tx descriptor address set */ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); @@ -1206,7 +1182,7 @@ static void sh_eth_ring_format(struct net_device *ndev) } } - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TDLE); } /* Get skb and descriptor buffer */ @@ -1264,7 +1240,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) { int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); - u32 val; /* Soft Reset */ ret = sh_eth_reset(ndev); @@ -1317,10 +1292,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) } /* PAUSE Prohibition */ - val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | - ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - - sh_eth_write(ndev, val, ECMR); + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); @@ -1362,7 +1335,7 @@ static void sh_eth_dev_exit(struct net_device *ndev) * packet boundary if it's currently running */ for (i = 0; i < mdp->num_tx_ring; i++) - mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); /* Disable TX FIFO egress to MAC */ sh_eth_rcv_snd_disable(ndev); @@ -1394,27 +1367,28 @@ static int sh_eth_txfree(struct net_device *ndev) for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { entry = mdp->dirty_tx % mdp->num_tx_ring; txdesc = &mdp->tx_ring[entry]; - if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) + if (txdesc->status & cpu_to_le32(TD_TACT)) break; /* TACT bit must be checked before all the following reads */ dma_rmb(); netif_info(mdp, tx_done, ndev, "tx entry %d status 0x%08x\n", - entry, edmac_to_cpu(mdp, txdesc->status)); + entry, le32_to_cpu(txdesc->status)); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { - dma_unmap_single(&ndev->dev, txdesc->addr, - txdesc->buffer_length, DMA_TO_DEVICE); + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), + le32_to_cpu(txdesc->len) >> 16, + DMA_TO_DEVICE); dev_kfree_skb_irq(mdp->tx_skbuff[entry]); mdp->tx_skbuff[entry] = NULL; free_num++; } - txdesc->status = cpu_to_edmac(mdp, TD_TFP); + txdesc->status = cpu_to_le32(TD_TFP); if (entry >= mdp->num_tx_ring - 1) - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TDLE); ndev->stats.tx_packets++; - ndev->stats.tx_bytes += txdesc->buffer_length; + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; } return free_num; } @@ -1433,15 +1407,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; + u32 buf_len; boguscnt = min(boguscnt, *quota); limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; - while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { /* RACT bit must be checked before all the following reads */ dma_rmb(); - desc_status = edmac_to_cpu(mdp, rxdesc->status); - pkt_len = rxdesc->frame_length; + desc_status = le32_to_cpu(rxdesc->status); + pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; if (--boguscnt < 0) break; @@ -1462,6 +1437,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (mdp->cd->shift_rd0) desc_status >>= 16; + skb = mdp->rx_skbuff[entry]; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { ndev->stats.rx_errors++; @@ -1477,16 +1453,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ndev->stats.rx_missed_errors++; if (desc_status & RD_RFS10) ndev->stats.rx_over_errors++; - } else { + } else if (skb) { + dma_addr = le32_to_cpu(rxdesc->addr); if (!mdp->cd->hw_swap) sh_eth_soft_swap( - phys_to_virt(ALIGN(rxdesc->addr, 4)), + phys_to_virt(ALIGN(dma_addr, 4)), pkt_len + 2); - skb = mdp->rx_skbuff[entry]; mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); - dma_unmap_single(&ndev->dev, rxdesc->addr, + dma_unmap_single(&ndev->dev, dma_addr, ALIGN(mdp->rx_buf_sz, 32), DMA_FROM_DEVICE); skb_put(skb, pkt_len); @@ -1506,7 +1482,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) entry = mdp->dirty_rx % mdp->num_rx_ring; rxdesc = &mdp->rx_ring[entry]; /* The size of the buffer is 32 byte boundary. */ - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); + buf_len = ALIGN(mdp->rx_buf_sz, 32); + rxdesc->len = cpu_to_le32(buf_len << 16); if (mdp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(ndev, skbuff_size); @@ -1514,8 +1491,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) break; /* Better luck next round. */ sh_eth_set_receive_align(skb); dma_addr = dma_map_single(&ndev->dev, skb->data, - rxdesc->buffer_length, - DMA_FROM_DEVICE); + buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { kfree_skb(skb); break; @@ -1523,15 +1499,14 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = skb; skb_checksum_none_assert(skb); - rxdesc->addr = dma_addr; + rxdesc->addr = cpu_to_le32(dma_addr); } dma_wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= - cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE); + cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); else - rxdesc->status |= - cpu_to_edmac(mdp, RD_RACT | RD_RFP); + rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); } /* Restart Rx engine if stopped. */ @@ -1848,8 +1823,7 @@ static int sh_eth_phy_init(struct net_device *ndev) return PTR_ERR(phydev); } - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + phy_attached_info(phydev); mdp->phydev = phydev; @@ -2331,8 +2305,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < mdp->num_rx_ring; i++) { rxdesc = &mdp->rx_ring[i]; - rxdesc->status = 0; - rxdesc->addr = 0xBADF00D0; + rxdesc->status = cpu_to_le32(0); + rxdesc->addr = cpu_to_le32(0xBADF00D0); dev_kfree_skb(mdp->rx_skbuff[i]); mdp->rx_skbuff[i] = NULL; } @@ -2350,6 +2324,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_txdesc *txdesc; + dma_addr_t dma_addr; u32 entry; unsigned long flags; @@ -2372,21 +2347,21 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdesc = &mdp->tx_ring[entry]; /* soft swap. */ if (!mdp->cd->hw_swap) - sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), - skb->len + 2); - txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, txdesc->addr)) { + sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); + dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { kfree_skb(skb); return NETDEV_TX_OK; } - txdesc->buffer_length = skb->len; + txdesc->addr = cpu_to_le32(dma_addr); + txdesc->len = cpu_to_le32(skb->len << 16); dma_wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1) - txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); else - txdesc->status |= cpu_to_edmac(mdp, TD_TACT); + txdesc->status |= cpu_to_le32(TD_TACT); mdp->cur_tx++; @@ -2881,7 +2856,7 @@ static int sh_mdio_release(struct sh_eth_private *mdp) static int sh_mdio_init(struct sh_eth_private *mdp, struct sh_eth_plat_data *pd) { - int ret, i; + int ret; struct bb_info *bitbang; struct platform_device *pdev = mdp->pdev; struct device *dev = &mdp->pdev->dev; @@ -2894,10 +2869,6 @@ static int sh_mdio_init(struct sh_eth_private *mdp, /* bitbang init */ bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; bitbang->set_gate = pd->set_mdio_gate; - bitbang->mdi_msk = PIR_MDI; - bitbang->mdo_msk = PIR_MDO; - bitbang->mmd_msk = PIR_MMD; - bitbang->mdc_msk = PIR_MDC; bitbang->ctrl.ops = &bb_ops; /* MII controller setting */ @@ -2911,20 +2882,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - /* PHY IRQ */ - mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!mdp->mii_bus->irq) { - ret = -ENOMEM; - goto out_free_bus; - } - /* register MDIO bus */ if (dev->of_node) { ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - mdp->mii_bus->irq[i] = PHY_POLL; if (pd->phy_irq > 0) mdp->mii_bus->irq[pd->phy] = pd->phy_irq; @@ -3096,8 +3057,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* get PHY ID */ mdp->phy_id = pd->phy; mdp->phy_interface = pd->phy_interface; - /* EDMAC endian */ - mdp->edmac_endian = pd->edmac_endian; mdp->no_ether_link = pd->no_ether_link; mdp->ether_link_active_low = pd->ether_link_active_low; @@ -3277,13 +3236,6 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, - { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data }, - { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, - { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, - { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, { } }; MODULE_DEVICE_TABLE(platform, sh_eth_id_table); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 50382b1c9ddc..8fa4ef3a7fdd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -283,7 +283,7 @@ enum DMAC_IM_BIT { DMAC_M_RINT1 = 0x00000001, }; -/* Receive descriptor bit */ +/* Receive descriptor 0 bits */ enum RD_STS_BIT { RD_RACT = 0x80000000, RD_RDLE = 0x40000000, RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, @@ -298,6 +298,12 @@ enum RD_STS_BIT { #define RDFEND RD_RFP0 #define RD_RFP (RD_RFP1|RD_RFP0) +/* Receive descriptor 1 bits */ +enum RD_LEN_BIT { + RD_RFL = 0x0000ffff, /* receive frame length */ + RD_RBL = 0xffff0000, /* receive buffer length */ +}; + /* FCFTR */ enum FCFTR_BIT { FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, @@ -307,7 +313,7 @@ enum FCFTR_BIT { #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) -/* Transmit descriptor bit */ +/* Transmit descriptor 0 bits */ enum TD_STS_BIT { TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, @@ -317,6 +323,11 @@ enum TD_STS_BIT { #define TDFEND TD_TFP0 #define TD_TFP (TD_TFP1|TD_TFP0) +/* Transmit descriptor 1 bits */ +enum TD_LEN_BIT { + TD_TBL = 0xffff0000, /* transmit buffer length */ +}; + /* RMCR */ enum RMCR_BIT { RMCR_RNC = 0x00000001, @@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT { */ struct sh_eth_txdesc { u32 status; /* TD0 */ -#if defined(__LITTLE_ENDIAN) - u16 pad0; /* TD1 */ - u16 buffer_length; /* TD1 */ -#else - u16 buffer_length; /* TD1 */ - u16 pad0; /* TD1 */ -#endif + u32 len; /* TD1 */ u32 addr; /* TD2 */ - u32 pad1; /* padding data */ + u32 pad0; /* padding data */ } __aligned(2) __packed; /* The sh ether Rx buffer descriptors. @@ -441,13 +446,7 @@ struct sh_eth_txdesc { */ struct sh_eth_rxdesc { u32 status; /* RD0 */ -#if defined(__LITTLE_ENDIAN) - u16 frame_length; /* RD1 */ - u16 buffer_length; /* RD1 */ -#else - u16 buffer_length; /* RD1 */ - u16 frame_length; /* RD1 */ -#endif + u32 len; /* RD1 */ u32 addr; /* RD2 */ u32 pad0; /* padding data */ } __aligned(2) __packed; @@ -514,7 +513,6 @@ struct sh_eth_private { u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ u32 cur_tx, dirty_tx; u32 rx_buf_sz; /* Based on MTU+slack. */ - int edmac_endian; struct napi_struct napi; bool irq_enabled; /* MII transceiver section. */ @@ -546,31 +544,6 @@ static inline void sh_eth_soft_swap(char *src, int len) #endif } -#define SH_ETH_OFFSET_INVALID ((u16) ~0) - -static inline void sh_eth_write(struct net_device *ndev, u32 data, - int enum_index) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - u16 offset = mdp->reg_offset[enum_index]; - - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) - return; - - iowrite32(data, mdp->addr + offset); -} - -static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - u16 offset = mdp->reg_offset[enum_index]; - - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) - return ~0U; - - return ioread32(mdp->addr + offset); -} - static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index e9f2349e98bc..a4ab71d43e4e 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4998,7 +4998,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; dev->switchdev_ops = &rocker_port_switchdev_ops; - netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, NAPI_POLL_WEIGHT); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, NAPI_POLL_WEIGHT); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c index 43ccb4a6de15..467ff7033606 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -180,7 +180,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - struct phy_device *phy = mdio_bus->phy_map[phy_addr]; + struct phy_device *phy = mdiobus_get_phy(mdio_bus, phy_addr); if (phy) { char irq_num[4]; @@ -216,7 +216,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", phy->phy_id, phy_addr, irq_str, - dev_name(&phy->dev), act ? " active" : ""); + phydev_name(phy), act ? " active" : ""); phy_found = true; } } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index bc6d21b471be..98d33d462c6c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -181,13 +181,6 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); if (!(nic_data->datapath_caps & - (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { - netif_err(efx, drv, efx->net_dev, - "current firmware does not support TSO\n"); - return -ENODEV; - } - - if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { netif_err(efx, probe, efx->net_dev, "current firmware does not support an RX prefix\n"); @@ -493,10 +486,17 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); for (i = 0; i < n; i++) { - rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) { + /* Don't display the MC error if we didn't have space + * for a VF. + */ + if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) + efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, + 0, outbuf, outlen, rc); break; + } if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { rc = -EIO; break; @@ -1797,6 +1797,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); tx_queue->write_count = 1; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { + tx_queue->tso_version = 1; + } + wmb(); efx_ef10_push_tx_desc(tx_queue, txd); @@ -2375,8 +2381,19 @@ static int efx_ef10_ev_init(struct efx_channel *channel) 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { netif_info(efx, drv, efx->net_dev, "other functions on NIC have been reset\n"); - /* MC's boot count has incremented */ - ++nic_data->warm_boot_count; + + /* With MCFW v4.6.x and earlier, the + * boot count will have incremented, + * so re-read the warm_boot_count + * value now to ensure this function + * doesn't think it has changed next + * time it checks. + */ + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) { + nic_data->warm_boot_count = rc; + rc = 0; + } } nic_data->workaround_26807 = true; } else if (rc == -EPERM) { @@ -3299,7 +3316,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - EFX_FILTER_FLAG_RX_RSS); + (efx_rss_enabled(efx) ? + EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; rc = efx_ef10_filter_push(efx, &new_spec, @@ -3822,13 +3840,12 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, table->entry[filter_idx].handle); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, + sizeof(inbuf), NULL, 0, NULL); if (rc) - netdev_WARN(efx->net_dev, - "filter_idx=%#x handle=%#llx\n", - filter_idx, - table->entry[filter_idx].handle); + netif_info(efx, drv, efx->net_dev, + "%s: filter %04x remove failed\n", + __func__, filter_idx); kfree(spec); } @@ -3837,11 +3854,14 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) } #define EFX_EF10_FILTER_DO_MARK_OLD(id) \ - if (id != EFX_EF10_FILTER_ID_INVALID) { \ - filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ - WARN_ON(!table->entry[filter_idx].spec); \ - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ - } + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + if (!table->entry[filter_idx].spec) \ + netif_dbg(efx, drv, efx->net_dev, \ + "%s: marked null spec old %04x:%04x\n", \ + __func__, id, filter_idx); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\ + } static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -3921,6 +3941,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_dev_addr *addr_list; + enum efx_filter_flags filter_flags; struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; unsigned int i, j; @@ -3935,11 +3956,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, addr_count = table->dev_uc_count; } + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); @@ -3968,9 +3989,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); @@ -4000,13 +4019,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + enum efx_filter_flags filter_flags; struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); if (multicast) efx_filter_set_mc_def(&spec); @@ -4015,16 +4035,16 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_warn(efx, drv, efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, + efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); } else if (multicast) { table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); if (!nic_data->workaround_26807) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); @@ -4060,19 +4080,31 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, static void efx_ef10_filter_remove_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - bool remove_failed = false; + int remove_failed = 0; + int remove_noent = 0; + int rc; int i; for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { - if (efx_ef10_filter_remove_internal( - efx, 1U << EFX_FILTER_PRI_AUTO, - i, true) < 0) - remove_failed = true; + rc = efx_ef10_filter_remove_internal(efx, + 1U << EFX_FILTER_PRI_AUTO, i, true); + if (rc == -ENOENT) + remove_noent++; + else if (rc) + remove_failed++; } } - WARN_ON(remove_failed); + + if (remove_failed) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d filters\n", + __func__, remove_failed); + if (remove_noent) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d non-existent filters\n", + __func__, remove_noent); } static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a3c42a376741..0705ec869487 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2059,7 +2059,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); - napi_hash_add(&channel->napi_str); efx_channel_busy_poll_init(channel); } @@ -2785,6 +2784,12 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {0} /* end of list */ }; @@ -3123,10 +3128,10 @@ static int efx_pci_probe(struct pci_dev *pci_dev, net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); - if (efx->type->offload_features & NETIF_F_V6_CSUM) + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ - net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); /* All offloads can be toggled */ @@ -3169,14 +3174,15 @@ static int efx_pci_probe(struct pci_dev *pci_dev, rtnl_lock(); rc = efx_mtd_probe(efx); rtnl_unlock(); - if (rc) + if (rc && rc != -EPERM) netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); rc = pci_enable_pcie_error_reporting(pci_dev); if (rc && rc != -EINVAL) - netif_warn(efx, probe, efx->net_dev, - "pci_enable_pcie_error_reporting failed (%d)\n", rc); + netif_notice(efx, probe, efx->net_dev, + "PCIE error reporting unavailable (%d).\n", + rc); return 0; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 1aaf76c1ace8..10827476bc0b 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->rss_spread > 1; +} + /* Filters */ void efx_mac_reconfigure(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 5a1c5a8f278a..133e9e35be9e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx, */ spec->priority = EFX_FILTER_PRI_AUTO; spec->flags = (EFX_FILTER_FLAG_RX | - (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); spec->dmaq_id = 0; } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 41fb6b60a3f0..d28e7dd8fa3c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -82,6 +82,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->logging_enabled = mcdi_logging_default; #endif init_waitqueue_head(&mcdi->wq); + init_waitqueue_head(&mcdi->proxy_rx_wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; mcdi->mode = MCDI_MODE_POLL; @@ -315,6 +316,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } #endif + mcdi->resprc_raw = 0; if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; @@ -325,8 +327,8 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) mcdi->resprc = -EIO; } else if (error) { efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); - mcdi->resprc = - efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); + mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); + mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw); } else { mcdi->resprc = 0; } @@ -621,9 +623,30 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) return 0; } -static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, +static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx, + size_t hdr_len, size_t data_len, + u32 *proxy_handle) +{ + MCDI_DECLARE_BUF_ERR(testbuf); + const size_t buflen = sizeof(testbuf); + + if (!proxy_handle || data_len < buflen) + return false; + + efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen); + if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) { + *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE); + return true; + } + + return false; +} + +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual, bool quiet) + size_t *outlen_actual, bool quiet, + u32 *proxy_handle, int *raw_rc) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); MCDI_DECLARE_BUF_ERR(errbuf); @@ -657,6 +680,9 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, spin_unlock_bh(&mcdi->iface_lock); } + if (proxy_handle) + *proxy_handle = 0; + if (rc != 0) { if (outlen_actual) *outlen_actual = 0; @@ -669,6 +695,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = mcdi->resprc; + if (raw_rc) + *raw_rc = mcdi->resprc_raw; hdr_len = mcdi->resp_hdr_len; data_len = mcdi->resp_data_len; err_len = min(sizeof(errbuf), data_len); @@ -689,6 +717,12 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (proxy_handle && (rc == -EPROTO) && + efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, + proxy_handle)) { + mcdi->proxy_rx_status = 0; + mcdi->proxy_rx_handle = 0; + mcdi->state = MCDI_STATE_PROXY_WAIT; } else if (rc && !quiet) { efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, rc); @@ -701,34 +735,195 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, } } - efx_mcdi_release(mcdi); + if (!proxy_handle || !*proxy_handle) + efx_mcdi_release(mcdi); return rc; } -static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, +static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->state == MCDI_STATE_PROXY_WAIT) { + /* Interrupt the proxy wait. */ + mcdi->proxy_rx_status = -EINTR; + wake_up(&mcdi->proxy_rx_wq); + } +} + +static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, + u32 handle, int status) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT); + + mcdi->proxy_rx_status = efx_mcdi_errno(status); + /* Ensure the status is written before we update the handle, since the + * latter is used to check if we've finished. + */ + wmb(); + mcdi->proxy_rx_handle = handle; + wake_up(&mcdi->proxy_rx_wq); +} + +static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + /* Wait for a proxy event, or timeout. */ + rc = wait_event_timeout(mcdi->proxy_rx_wq, + mcdi->proxy_rx_handle != 0 || + mcdi->proxy_rx_status == -EINTR, + MCDI_RPC_TIMEOUT); + + if (rc <= 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy timeout %d\n", handle); + return -ETIMEDOUT; + } else if (mcdi->proxy_rx_handle != handle) { + netif_warn(efx, hw, efx->net_dev, + "MCDI proxy unexpected handle %d (expected %d)\n", + mcdi->proxy_rx_handle, handle); + return -EINVAL; + } + + return mcdi->proxy_rx_status; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual, bool quiet) + size_t *outlen_actual, bool quiet, int *raw_rc) { + u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */ int rc; + if (inbuf && inlen && (inbuf == outbuf)) { + /* The input buffer can't be aliased with the output. */ + WARN_ON(1); + return -EINVAL; + } + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); - if (rc) { - if (outlen_actual) - *outlen_actual = 0; + if (rc) return rc; + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet, &proxy_handle, raw_rc); + + if (proxy_handle) { + /* Handle proxy authorisation. This allows approval of MCDI + * operations to be delegated to the admin function, allowing + * fine control over (eg) multicast subscriptions. + */ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + netif_dbg(efx, hw, efx->net_dev, + "MCDI waiting for proxy auth %d\n", + proxy_handle); + rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet); + + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy retry %d\n", proxy_handle); + + /* We now retry the original request. */ + mcdi->state = MCDI_STATE_RUNNING_SYNC; + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, + outbuf, outlen, outlen_actual, + quiet, NULL, raw_rc); + } else { + netif_printk(efx, hw, + rc == -EPERM ? KERN_DEBUG : KERN_ERR, + efx->net_dev, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); + + if (rc == -EINTR || rc == -EIO) + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_mcdi_release(mcdi); + } } - return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, quiet); + + return rc; } +static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int raw_rc = 0; + int rc; + + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, true, &raw_rc); + + if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + efx->type->is_vf) { + /* If the EVB port isn't available within a VF this may + * mean the PF is still bringing the switch up. We should + * retry our request shortly. + */ + unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT; + unsigned int delay_us = 10000; + + netif_dbg(efx, hw, efx->net_dev, + "%s: NO_EVB_PORT; will retry request\n", + __func__); + + do { + usleep_range(delay_us, delay_us + 10000); + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, + true, &raw_rc); + if (delay_us < 100000) + delay_us <<= 1; + } while ((rc == -EPROTO) && + (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + time_before(jiffies, abort_time)); + } + + if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) + efx_mcdi_display_error(efx, cmd, inlen, + outbuf, outlen, rc); + + return rc; +} + +/** + * efx_mcdi_rpc - Issue an MCDI command and wait for completion + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in an appropriate + * context. + * + * Return: A negative error code, or zero if successful. The error + * code may come from the MCDI response or may indicate a failure + * to communicate with the MC. In the former case, the response + * will still be copied to @outbuf and *@outlen_actual will be + * set accordingly. In the latter case, *@outlen_actual will be + * set to zero. + */ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, - outlen_actual, false); + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); } /* Normally, on receiving an error code in the MCDI response, @@ -744,8 +939,8 @@ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, - outlen_actual, true); + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); } int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, @@ -866,7 +1061,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, false); + outlen_actual, false, NULL, NULL); } int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, @@ -874,7 +1069,7 @@ int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, true); + outlen_actual, true, NULL, NULL); } void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, @@ -887,9 +1082,10 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", - cmd, (int)inlen, rc, code, err_arg); + netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, + efx->net_dev, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -1014,8 +1210,13 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) * receiving a REBOOT event after posting the MCDI * request. Did the mc reboot before or after the copyout? The * best we can do always is just return failure. + * + * If there is an outstanding proxy response expected it is not going + * to arrive. We should thus abort it. */ spin_lock(&mcdi->iface_lock); + efx_mcdi_proxy_abort(mcdi); + if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; @@ -1063,6 +1264,8 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx) spin_lock(&mcdi->iface_lock); efx->mc_bist_for_other_fn = true; + efx_mcdi_proxy_abort(mcdi); + if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = -EIO; @@ -1171,6 +1374,11 @@ void efx_mcdi_process_event(struct efx_channel *channel, EFX_QWORD_VAL(*event)); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); break; + case MCDI_EVENT_CODE_PROXY_RESPONSE: + efx_mcdi_ev_proxy_response(efx, + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); + break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", code); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 025d504c472b..c9aeb0701c9a 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -17,6 +17,8 @@ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. * Only the thread that moved into this state is allowed to move out of it. * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. + * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that + * indicates we must wait for a proxy try again message. * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread * has not yet consumed the result. For all other threads, equivalent to * %MCDI_STATE_RUNNING. @@ -25,6 +27,7 @@ enum efx_mcdi_state { MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC, MCDI_STATE_RUNNING_ASYNC, + MCDI_STATE_PROXY_WAIT, MCDI_STATE_COMPLETED, }; @@ -60,6 +63,9 @@ enum efx_mcdi_mode { * @async_timer: Timer for asynchronous request timeout * @logging_buffer: buffer that may be used to build MCDI tracing messages * @logging_enabled: whether to trace MCDI + * @proxy_rx_handle: Most recently received proxy authorisation handle + * @proxy_rx_status: Status of most recent proxy authorisation + * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -71,6 +77,7 @@ struct efx_mcdi_iface { unsigned int credits; unsigned int seqno; int resprc; + int resprc_raw; size_t resp_hdr_len; size_t resp_data_len; spinlock_t async_lock; @@ -80,6 +87,9 @@ struct efx_mcdi_iface { char *logging_buffer; bool logging_enabled; #endif + unsigned int proxy_rx_handle; + int proxy_rx_status; + wait_queue_head_t proxy_rx_wq; }; struct efx_mcdi_mon { diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a8ddd122f685..38c422321cda 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -182,6 +182,7 @@ struct efx_tx_buffer { * * @efx: The associated Efx NIC * @queue: DMA queue number + * @tso_version: Version of TSO in use for this queue. * @channel: The associated channel * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring @@ -228,6 +229,7 @@ struct efx_tx_queue { /* Members which don't change on the fast path */ struct efx_nic *efx ____cacheline_aligned_in_smp; unsigned queue; + unsigned int tso_version; struct efx_channel *channel; struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; @@ -1502,8 +1504,9 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, * same cycle, the XMAC can miss the IPG altogether. We work around * this by adding a further 16 bytes. */ +#define EFX_FRAME_PAD 16 #define EFX_MAX_FRAME_LEN(mtu) \ - ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) + (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8)) static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 809ea4610a77..8956995b2fe7 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -463,7 +463,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb_record_rx_queue(skb, channel->rx_queue.core_index); - skb_mark_napi_id(skb, &channel->napi_str); gro_result = napi_gro_frags(napi); if (gro_result != GRO_DROP) channel->irq_mod_score += 2; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 67f6afaa022f..f7a0ec1bca97 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -1010,13 +1010,17 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, /* Parse the SKB header and initialise state. */ static int tso_start(struct tso_state *st, struct efx_nic *efx, + struct efx_tx_queue *tx_queue, const struct sk_buff *skb) { - bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; struct device *dma_dev = &efx->pci_dev->dev; unsigned int header_len, in_len; + bool use_opt_desc = false; dma_addr_t dma_addr; + if (tx_queue->tso_version == 1) + use_opt_desc = true; + st->ip_off = skb_network_header(skb) - skb->data; st->tcp_off = skb_transport_header(skb) - skb->data; header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); @@ -1271,7 +1275,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); - rc = tso_start(&state, efx, skb); + rc = tso_start(&state, efx, tx_queue, skb); if (rc) goto mem_err; diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c index 3d5ee3259885..194f67d9f3bf 100644 --- a/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/txc43128_phy.c @@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) val |= (1 << TXC_GLCMD_LMTSWRST_LBN); efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); - while (tries--) { + while (--tries) { val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) break; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 219a99b7a631..8af25563f627 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -864,8 +864,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) for (i = 0; i < 10; i++) { /* Set PHY to 10/FD, no ANEG, and loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, - BMCR_LOOPBACK | BMCR_FULLDPLX); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, + MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX); /* Enable MAC tx/rx, FD */ spin_lock_irqsave(&pdata->mac_lock, flags); @@ -893,7 +893,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) spin_unlock_irqrestore(&pdata->mac_lock, flags); /* Cancel PHY loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, MII_BMCR, 0); smsc911x_reg_write(pdata, TX_CFG, 0); smsc911x_reg_write(pdata, RX_CFG, 0); @@ -1021,7 +1021,7 @@ static int smsc911x_mii_probe(struct net_device *dev) } SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", - phydev->addr, phydev->phy_id); + phydev->mdio.addr, phydev->phy_id); ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, pdata->config.phy_interface); @@ -1031,9 +1031,7 @@ static int smsc911x_mii_probe(struct net_device *dev) return ret; } - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | @@ -1061,7 +1059,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pdata->mii_bus = mdiobus_alloc(); if (!pdata->mii_bus) { @@ -1075,9 +1073,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, pdata->mii_bus->priv = pdata; pdata->mii_bus->read = smsc911x_mii_read; pdata->mii_bus->write = smsc911x_mii_write; - pdata->mii_bus->irq = pdata->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pdata->mii_bus->irq[i] = PHY_POLL; + memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus)); pdata->mii_bus->parent = &pdev->dev; @@ -1992,7 +1988,8 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, } for (i = 0; i <= 31; i++) - data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc911x_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 4a90cdae5444..8594b9e8b28b 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -78,7 +78,6 @@ struct smsc9420_pdata { struct phy_device *phy_dev; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; int last_duplex; int last_carrier; }; @@ -316,7 +315,8 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, return; for (i = 0; i <= 31; i++) - data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc9420_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) @@ -1158,16 +1158,13 @@ static int smsc9420_mii_probe(struct net_device *dev) BUG_ON(pd->phy_dev); /* Device only supports internal PHY at address 1 */ - if (!pd->mii_bus->phy_map[1]) { + phydev = mdiobus_get_phy(pd->mii_bus, 1); + if (!phydev) { netdev_err(dev, "no PHY found at address 1\n"); return -ENODEV; } - phydev = pd->mii_bus->phy_map[1]; - netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", - phydev->addr, phydev->phy_id); - - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1175,14 +1172,13 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); phydev->advertising = phydev->supported; + phy_attached_info(phydev); + pd->phy_dev = phydev; pd->last_duplex = -1; pd->last_carrier = -1; @@ -1193,7 +1189,7 @@ static int smsc9420_mii_probe(struct net_device *dev) static int smsc9420_mii_init(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pd->mii_bus = mdiobus_alloc(); if (!pd->mii_bus) { @@ -1206,9 +1202,6 @@ static int smsc9420_mii_init(struct net_device *dev) pd->mii_bus->priv = pd; pd->mii_bus->read = smsc9420_mii_read; pd->mii_bus->write = smsc9420_mii_write; - pd->mii_bus->irq = pd->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pd->mii_bus->irq[i] = PHY_POLL; /* Mask all PHYs except ID 1 (internal) */ pd->mii_bus->phy_mask = ~(1 << 1); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 623c6ed8764a..1e19c8fd8b82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -137,6 +137,31 @@ struct stmmac_extra_stats { unsigned long pcs_link; unsigned long pcs_duplex; unsigned long pcs_speed; + /* debug register */ + unsigned long mtl_tx_status_fifo_full; + unsigned long mtl_tx_fifo_not_empty; + unsigned long mmtl_fifo_ctrl; + unsigned long mtl_tx_fifo_read_ctrl_write; + unsigned long mtl_tx_fifo_read_ctrl_wait; + unsigned long mtl_tx_fifo_read_ctrl_read; + unsigned long mtl_tx_fifo_read_ctrl_idle; + unsigned long mac_tx_in_pause; + unsigned long mac_tx_frame_ctrl_xfer; + unsigned long mac_tx_frame_ctrl_idle; + unsigned long mac_tx_frame_ctrl_wait; + unsigned long mac_tx_frame_ctrl_pause; + unsigned long mac_gmii_tx_proto_engine; + unsigned long mtl_rx_fifo_fill_level_full; + unsigned long mtl_rx_fifo_fill_above_thresh; + unsigned long mtl_rx_fifo_fill_below_thresh; + unsigned long mtl_rx_fifo_fill_level_empty; + unsigned long mtl_rx_fifo_read_ctrl_flush; + unsigned long mtl_rx_fifo_read_ctrl_read_data; + unsigned long mtl_rx_fifo_read_ctrl_status; + unsigned long mtl_rx_fifo_read_ctrl_idle; + unsigned long mtl_rx_fifo_ctrl_active; + unsigned long mac_rx_frame_ctrl_fifo; + unsigned long mac_gmii_rx_proto_engine; }; /* CSR Frequency Access Defines*/ @@ -408,12 +433,13 @@ struct stmmac_ops { void (*set_eee_pls)(struct mac_device_info *hw, int link); void (*ctrl_ane)(struct mac_device_info *hw, bool restart); void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); }; /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); - void (*config_sub_second_increment) (void __iomem *ioaddr); + u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); int (*config_addend) (void __iomem *ioaddr, u32 addend); int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 82de68b1a452..36d3355f2fb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -198,19 +198,19 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) return 0; } -static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) +static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) { struct device *dev = &gmac->pdev->dev; gmac->phy_mode = of_get_phy_mode(dev->of_node); if (gmac->phy_mode < 0) { dev_err(dev, "missing phy mode property\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) { dev_err(dev, "missing qcom id property\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* The GMACs are called 1 to 4 in the documentation, but to simplify the @@ -219,13 +219,13 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) */ if (gmac->id < 0 || gmac->id > 3) { dev_err(dev, "invalid gmac id\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } gmac->core_clk = devm_clk_get(dev, "stmmaceth"); if (IS_ERR(gmac->core_clk)) { dev_err(dev, "missing stmmaceth clk property\n"); - return gmac->core_clk; + return PTR_ERR(gmac->core_clk); } clk_set_rate(gmac->core_clk, 266000000); @@ -234,18 +234,16 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) "qcom,nss-common"); if (IS_ERR(gmac->nss_common)) { dev_err(dev, "missing nss-common node\n"); - return gmac->nss_common; + return PTR_ERR(gmac->nss_common); } /* Setup the register map for the qsgmii csr registers */ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node, "qcom,qsgmii-csr"); - if (IS_ERR(gmac->qsgmii_csr)) { + if (IS_ERR(gmac->qsgmii_csr)) dev_err(dev, "missing qsgmii-csr node\n"); - return gmac->qsgmii_csr; - } - return NULL; + return PTR_ERR_OR_ZERO(gmac->qsgmii_csr); } static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) @@ -262,7 +260,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ipq806x_gmac *gmac; int val; - void *err; + int err; val = stmmac_get_platform_resources(pdev, &stmmac_res); if (val) @@ -279,9 +277,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); - if (IS_ERR(err)) { + if (err) { dev_err(dev, "device tree parsing error\n"); - return PTR_ERR(err); + return err; } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 401383b252a8..f0d797ab74d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -32,6 +32,7 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 +#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 #define EMAC_SPLITTER_CTRL_REG 0x0 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 @@ -47,6 +48,7 @@ struct socfpga_dwmac { struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; void __iomem *splitter_base; + bool f2h_ptp_ref_clk; }; static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) @@ -116,6 +118,8 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return -EINVAL; } + dwmac->f2h_ptp_ref_clk = of_property_read_bool(np, "altr,f2h_ptp_ref_clk"); + np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); if (np_splitter) { if (of_address_to_resource(np_splitter, 0, &res_splitter)) { @@ -171,6 +175,11 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; + if (dwmac->f2h_ptp_ref_clk) + ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); + else + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 52b8ed9bd87c..adff46375a32 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + sun7i_gmac_exit(pdev, plat_dat->bsp_priv); + + return ret; } static const struct of_device_id sun7i_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index b3fe0575ff6b..8831a053ac13 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -34,6 +34,7 @@ #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC_DEBUG 0x00000024 /* GMAC debug register */ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ @@ -177,6 +178,47 @@ enum inter_frame_gap { #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ +/* DEBUG Register defines */ +/* MTL TxStatus FIFO */ +#define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */ +#define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */ +#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */ +/* MTL Tx FIFO Read Controller Status */ +#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20) +#define GMAC_DEBUG_TRCSTS_SHIFT 20 +#define GMAC_DEBUG_TRCSTS_IDLE 0 +#define GMAC_DEBUG_TRCSTS_READ 1 +#define GMAC_DEBUG_TRCSTS_TXW 2 +#define GMAC_DEBUG_TRCSTS_WRITE 3 +#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */ +/* MAC Transmit Frame Controller Status */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +/* MAC GMII or MII Transmit Protocol Engine Status */ +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */ +#define GMAC_DEBUG_RXFSTS_SHIFT 8 +#define GMAC_DEBUG_RXFSTS_EMPTY 0 +#define GMAC_DEBUG_RXFSTS_BT 1 +#define GMAC_DEBUG_RXFSTS_AT 2 +#define GMAC_DEBUG_RXFSTS_FULL 3 +#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */ +#define GMAC_DEBUG_RRCSTS_SHIFT 5 +#define GMAC_DEBUG_RRCSTS_IDLE 0 +#define GMAC_DEBUG_RRCSTS_RDATA 1 +#define GMAC_DEBUG_RRCSTS_RSTAT 2 +#define GMAC_DEBUG_RRCSTS_FLUSH 3 +#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */ +/* MAC Receive Frame Controller FIFO Status */ +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +/* MAC GMII or MII Receive Protocol Engine Status */ +#define GMAC_DEBUG_RPESTS BIT(0) + /*--- DMA BLOCK defines ---*/ /* DMA Bus Mode register defines */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 371a669d69fd..c2941172f6d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -397,6 +397,80 @@ static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; } +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & GMAC_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & GMAC_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & GMAC_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK) + >> GMAC_DEBUG_TRCSTS_SHIFT; + if (trcsts == GMAC_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == GMAC_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == GMAC_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK) + >> GMAC_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == GMAC_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & GMAC_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >> + GMAC_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .rx_ipc = dwmac1000_rx_ipc_enable, @@ -413,6 +487,7 @@ static const struct stmmac_ops dwmac1000_ops = { .set_eee_pls = dwmac1000_set_eee_pls, .ctrl_ane = dwmac1000_ctrl_ane, .get_adv = dwmac1000_get_adv, + .debug = dwmac1000_debug, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2e51b816a7e8..4c6486cc80fb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -136,6 +136,31 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(irq_pcs_ane_n), STMMAC_STAT(irq_pcs_link_n), STMMAC_STAT(irq_rgmii_n), + /* DEBUG */ + STMMAC_STAT(mtl_tx_status_fifo_full), + STMMAC_STAT(mtl_tx_fifo_not_empty), + STMMAC_STAT(mmtl_fifo_ctrl), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_write), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_read), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle), + STMMAC_STAT(mac_tx_in_pause), + STMMAC_STAT(mac_tx_frame_ctrl_xfer), + STMMAC_STAT(mac_tx_frame_ctrl_idle), + STMMAC_STAT(mac_tx_frame_ctrl_wait), + STMMAC_STAT(mac_tx_frame_ctrl_pause), + STMMAC_STAT(mac_gmii_tx_proto_engine), + STMMAC_STAT(mtl_rx_fifo_fill_level_full), + STMMAC_STAT(mtl_rx_fifo_fill_above_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_below_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_level_empty), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_status), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle), + STMMAC_STAT(mtl_rx_fifo_ctrl_active), + STMMAC_STAT(mac_rx_frame_ctrl_fifo), + STMMAC_STAT(mac_gmii_rx_proto_engine), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -497,6 +522,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, if (val) priv->xstats.phy_eee_wakeup_error_n = val; } + + if ((priv->hw->mac->debug) && + (priv->synopsys_id >= DWMAC_CORE_3_50)) + priv->hw->mac->debug(priv->ioaddr, + (void *)&priv->xstats); } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 76ad214b4036..a77f68918010 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -33,22 +33,25 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) writel(data, ioaddr + PTP_TCR); } -static void stmmac_config_sub_second_increment(void __iomem *ioaddr) +static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock) { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; /* Convert the ptp_clock to nano second - * formula = (1/ptp_clock) * 1000000000 + * formula = (2/ptp_clock) * 1000000000 * where, ptp_clock = 50MHz. */ - data = (1000000000ULL / 50000000); + data = (2000000000ULL / ptp_clock); /* 0.465ns accuracy */ if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; writel(data, ioaddr + PTP_SSIR); + + return data; } static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3c6549aee11d..c21015b68097 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -53,6 +53,7 @@ #include "stmmac.h" #include <linux/reset.h> #include <linux/of_mdio.h> +#include "dwmac1000.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) @@ -185,7 +186,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } } @@ -435,6 +436,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) u32 ts_master_en = 0; u32 ts_event_en = 0; u32 value = 0; + u32 sec_inc; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -598,24 +600,19 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); /* program Sub Second Increment reg */ - priv->hw->ptp->config_sub_second_increment(priv->ioaddr); + sec_inc = priv->hw->ptp->config_sub_second_increment( + priv->ioaddr, priv->clk_ptp_rate); + temp = div_u64(1000000000ULL, sec_inc); /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = clk_ptp_ref_i/50MHz - * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; - * NOTE: clk_ptp_ref_i should be >= 50MHz to - * achieve 20ns accuracy. - * - * 2^x * y == (y << x), hence - * 2^32 * 50000000 ==> (50000000 << 32) + * where, freq_div_ratio = 1e9ns/sec_inc */ - temp = (u64) (50000000ULL << 32); + temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ioaddr, priv->default_addend); @@ -2402,7 +2399,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, features &= ~NETIF_F_RXCSUM; if (!priv->plat->tx_coe) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames @@ -2410,7 +2407,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, * the TX csum insertionin the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; return features; } @@ -3046,8 +3043,6 @@ int stmmac_suspend(struct net_device *ndev) priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); - stmmac_clear_descriptors(priv); - /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) { priv->hw->mac->pmt(priv->hw, priv->wolopts); @@ -3105,7 +3100,12 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); - init_dma_desc_rings(ndev, GFP_ATOMIC); + priv->cur_rx = 0; + priv->dirty_rx = 0; + priv->dirty_tx = 0; + priv->cur_tx = 0; + stmmac_clear_descriptors(priv); + stmmac_hw_setup(ndev, false); stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index bba670c42e37..0faf16336035 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -29,7 +29,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> - +#include <linux/of_mdio.h> #include <asm/io.h> #include "stmmac.h" @@ -196,25 +196,37 @@ int stmmac_mdio_register(struct net_device *ndev) { int err = 0; struct mii_bus *new_bus; - int *irqlist; struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; + struct device_node *mdio_node = NULL; + struct device_node *child_node = NULL; if (!mdio_bus_data) return 0; + if (IS_ENABLED(CONFIG_OF)) { + for_each_child_of_node(priv->device->of_node, child_node) { + if (of_device_is_compatible(child_node, + "snps,dwmac-mdio")) { + mdio_node = child_node; + break; + } + } + + if (mdio_node) { + netdev_dbg(ndev, "FOUND MDIO subnode\n"); + } else { + netdev_warn(ndev, "No MDIO subnode found\n"); + } + } + new_bus = mdiobus_alloc(); if (new_bus == NULL) return -ENOMEM; - if (mdio_bus_data->irqs) { - irqlist = mdio_bus_data->irqs; - } else { - for (addr = 0; addr < PHY_MAX_ADDR; addr++) - priv->mii_irq[addr] = PHY_POLL; - irqlist = priv->mii_irq; - } + if (mdio_bus_data->irqs) + memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); #ifdef CONFIG_OF if (priv->device->of_node) @@ -228,10 +240,13 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - new_bus->irq = irqlist; new_bus->phy_mask = mdio_bus_data->phy_mask; new_bus->parent = priv->device; - err = mdiobus_register(new_bus); + + if (mdio_node) + err = of_mdiobus_register(new_bus, mdio_node); + else + err = mdiobus_register(new_bus); if (err != 0) { pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; @@ -239,7 +254,7 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - struct phy_device *phydev = new_bus->phy_map[addr]; + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); if (phydev) { int act = 0; char irq_num[4]; @@ -251,7 +266,8 @@ int stmmac_mdio_register(struct net_device *ndev) */ if ((mdio_bus_data->irqs == NULL) && (mdio_bus_data->probed_phy_irq > 0)) { - irqlist[addr] = mdio_bus_data->probed_phy_irq; + new_bus->irq[addr] = + mdio_bus_data->probed_phy_irq; phydev->irq = mdio_bus_data->probed_phy_irq; } @@ -278,13 +294,13 @@ int stmmac_mdio_register(struct net_device *ndev) } pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n", ndev->name, phydev->phy_id, addr, - irq_str, dev_name(&phydev->dev), + irq_str, phydev_name(phydev), act ? " active" : ""); found = 1; } } - if (!found) { + if (!found && !mdio_node) { pr_warn("%s: No PHY found\n", ndev->name); mdiobus_unregister(new_bus); mdiobus_free(new_bus); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index d02691ba3d7f..6a52fa18cbf2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -146,7 +146,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if (plat->phy_node || plat->phy_bus_name) + if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 9066d7a8483c..70814b7386b3 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -972,9 +972,7 @@ static int dwceqos_mii_probe(struct net_device *ndev) } if (netif_msg_probe(lp)) - netdev_dbg(lp->ndev, - "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n", - phydev, phydev->phy_id, phydev->addr); + phy_attached_info(phydev); phydev->supported &= PHY_GBIT_FEATURES; @@ -983,14 +981,6 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->duplex = DUPLEX_UNKNOWN; lp->phy_dev = phydev; - if (netif_msg_probe(lp)) { - netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n", - lp->phy_dev->addr, lp->phy_dev->phy_id); - - netdev_dbg(lp->ndev, "attach [%s] phy driver\n", - lp->phy_dev->drv->name); - } - return 0; } @@ -1230,7 +1220,7 @@ static void dwceqos_enable_mmc_interrupt(struct net_local *lp) static int dwceqos_mii_init(struct net_local *lp) { - int ret = -ENXIO, i; + int ret = -ENXIO; struct resource res; struct device_node *mdionode; @@ -1251,24 +1241,14 @@ static int dwceqos_mii_init(struct net_local *lp) lp->mii_bus->priv = lp; lp->mii_bus->parent = &lp->ndev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; of_address_to_resource(lp->pdev->dev.of_node, 0, &res); snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; return 0; -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(lp->mii_bus); err_out: @@ -2107,7 +2087,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, dd = &lp->tx_descs[lp->tx_next]; /* Set DMA Descriptor fields */ - dd->des0 = dma_handle; + dd->des0 = dma_handle + consumed_size; dd->des1 = 0; dd->des2 = dma_size; @@ -2987,7 +2967,6 @@ static int dwceqos_remove(struct platform_device *pdev) if (lp->phy_dev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77d26fe286c0..7eef45e6d70a 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -316,8 +316,6 @@ static int cpmac_mdio_reset(struct mii_bus *bus) return 0; } -static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; - static struct mii_bus *cpmac_mii; static void cpmac_set_multicast_list(struct net_device *dev) @@ -1118,7 +1116,7 @@ static int cpmac_probe(struct platform_device *pdev) for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { if (!(pdata->phy_mask & (1 << phy_id))) continue; - if (!cpmac_mii->phy_map[phy_id]) + if (!mdiobus_get_phy(cpmac_mii, phy_id)) continue; strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); break; @@ -1226,7 +1224,6 @@ int cpmac_init(void) cpmac_mii->read = cpmac_mdio_read; cpmac_mii->write = cpmac_mdio_write; cpmac_mii->reset = cpmac_mdio_reset; - cpmac_mii->irq = mii_irqs; cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 48b92c9de12a..42fdfd4d9d4f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1159,8 +1159,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->data->phy_id, slave->slave_num); slave->phy = NULL; } else { - dev_info(priv->dev, "phy found : id is : 0x%x\n", - slave->phy->phy_id); + phy_attached_info(slave->phy); + phy_start(slave->phy); /* Configure GMII_SEL register */ @@ -2026,45 +2026,55 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, for_each_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; - u32 phyid; int lenp; const __be32 *parp; - struct device_node *mdio_node; - struct platform_device *mdio; /* This is no slave child node, continue */ if (strcmp(slave_node->name, "slave")) continue; priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); + parp = of_get_property(slave_node, "phy_id", &lenp); if (of_phy_is_fixed_link(slave_node)) { - struct phy_device *pd; + struct device_node *phy_node; + struct phy_device *phy_dev; + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ ret = of_phy_register_fixed_link(slave_node); if (ret) return ret; - pd = of_phy_find_device(slave_node); - if (!pd) + phy_node = of_node_get(slave_node); + phy_dev = of_phy_find_device(phy_node); + if (!phy_dev) return -ENODEV; snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, pd->bus->id, pd->phy_id); - goto no_phy_slave; - } - parp = of_get_property(slave_node, "phy_id", &lenp); - if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { - dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); + PHY_ID_FMT, phy_dev->mdio.bus->id, + phy_dev->mdio.addr); + } else if (parp) { + u32 phyid; + struct device_node *mdio_node; + struct platform_device *mdio; + + if (lenp != (sizeof(__be32) * 2)) { + dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i); + goto no_phy_slave; + } + mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); + phyid = be32_to_cpup(parp+1); + mdio = of_find_device_by_node(mdio_node); + of_node_put(mdio_node); + if (!mdio) { + dev_err(&pdev->dev, "Missing mdio platform device\n"); + return -EINVAL; + } + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), + PHY_ID_FMT, mdio->name, phyid); + } else { + dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); goto no_phy_slave; } - mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); - phyid = be32_to_cpup(parp+1); - mdio = of_find_device_by_node(mdio_node); - of_node_put(mdio_node); - if (!mdio) { - dev_err(&pdev->dev, "Missing mdio platform device\n"); - return -EINVAL; - } - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, mdio->name, phyid); slave_data->phy_if = of_get_phy_mode(slave_node); if (slave_data->phy_if < 0) { dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", @@ -2418,7 +2428,7 @@ static int cpsw_probe(struct platform_device *pdev) ndev->irq = platform_get_irq(pdev, 1); if (ndev->irq < 0) { dev_err(priv->dev, "error getting irq resource\n"); - ret = -ENOENT; + ret = ndev->irq; goto clean_ale_ret; } @@ -2439,8 +2449,10 @@ static int cpsw_probe(struct platform_device *pdev) /* RX IRQ */ irq = platform_get_irq(pdev, 1); - if (irq < 0) + if (irq < 0) { + ret = irq; goto clean_ale_ret; + } priv->irqs_table[0] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, @@ -2452,8 +2464,10 @@ static int cpsw_probe(struct platform_device *pdev) /* TX IRQ */ irq = platform_get_irq(pdev, 2); - if (irq < 0) + if (irq < 0) { + ret = irq; goto clean_ale_ret; + } priv->irqs_table[1] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, @@ -2469,7 +2483,7 @@ static int cpsw_probe(struct platform_device *pdev) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 33bd3b902304..5d9abedd6b75 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1644,10 +1644,7 @@ static int emac_dev_open(struct net_device *ndev) priv->speed = 0; priv->duplex = ~0; - dev_info(emac_dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - priv->phydev->drv->name, dev_name(&priv->phydev->dev), - priv->phydev->phy_id); + phy_attached_info(priv->phydev); } if (!priv->phydev) { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index c00084d689f3..4e7c9b9b042a 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -393,10 +393,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) /* scan and dump the bus */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = data->bus->phy_map[addr]; + phy = mdiobus_get_phy(data->bus, addr); if (phy) { dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->addr, dev_name(&phy->dev), + phy->mdio.addr, phydev_name(phy), phy->drv ? phy->drv->name : "unknown"); } } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index bb1bb72121c0..17a26a429b71 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -113,7 +113,7 @@ struct netcp_intf { #define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS struct netcp_packet { struct sk_buff *skb; - u32 *epib; + __le32 *epib; u32 *psdata; unsigned int psdata_len; struct netcp_intf *netcp; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 37b9b39192ec..c61d66d38634 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -109,69 +109,80 @@ module_param(netcp_debug_level, int, 0); MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); /* Helper functions - Get/Set */ -static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc, +static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, struct knav_dma_desc *desc) { - *buff_len = desc->buff_len; - *buff = desc->buff; - *ndesc = desc->next_desc; + *buff_len = le32_to_cpu(desc->buff_len); + *buff = le32_to_cpu(desc->buff); + *ndesc = le32_to_cpu(desc->next_desc); } -static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) +static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) { - *pad0 = desc->pad[0]; - *pad1 = desc->pad[1]; + *pad0 = le32_to_cpu(desc->pad[0]); + *pad1 = le32_to_cpu(desc->pad[1]); + *pad2 = le32_to_cpu(desc->pad[2]); } -static void get_org_pkt_info(u32 *buff, u32 *buff_len, +static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) +{ + u64 pad64; + + pad64 = le32_to_cpu(desc->pad[0]) + + ((u64)le32_to_cpu(desc->pad[1]) << 32); + *padptr = (void *)(uintptr_t)pad64; +} + +static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, struct knav_dma_desc *desc) { - *buff = desc->orig_buff; - *buff_len = desc->orig_len; + *buff = le32_to_cpu(desc->orig_buff); + *buff_len = le32_to_cpu(desc->orig_len); } -static void get_words(u32 *words, int num_words, u32 *desc) +static void get_words(dma_addr_t *words, int num_words, __le32 *desc) { int i; for (i = 0; i < num_words; i++) - words[i] = desc[i]; + words[i] = le32_to_cpu(desc[i]); } -static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc, +static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc, struct knav_dma_desc *desc) { - desc->buff_len = buff_len; - desc->buff = buff; - desc->next_desc = ndesc; + desc->buff_len = cpu_to_le32(buff_len); + desc->buff = cpu_to_le32(buff); + desc->next_desc = cpu_to_le32(ndesc); } static void set_desc_info(u32 desc_info, u32 pkt_info, struct knav_dma_desc *desc) { - desc->desc_info = desc_info; - desc->packet_info = pkt_info; + desc->desc_info = cpu_to_le32(desc_info); + desc->packet_info = cpu_to_le32(pkt_info); } -static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) +static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) { - desc->pad[0] = pad0; - desc->pad[1] = pad1; + desc->pad[0] = cpu_to_le32(pad0); + desc->pad[1] = cpu_to_le32(pad1); + desc->pad[2] = cpu_to_le32(pad1); } -static void set_org_pkt_info(u32 buff, u32 buff_len, +static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, struct knav_dma_desc *desc) { - desc->orig_buff = buff; - desc->orig_len = buff_len; + desc->orig_buff = cpu_to_le32(buff); + desc->orig_len = cpu_to_le32(buff_len); } -static void set_words(u32 *words, int num_words, u32 *desc) +static void set_words(u32 *words, int num_words, __le32 *desc) { int i; for (i = 0; i < num_words; i++) - desc[i] = words[i]; + desc[i] = cpu_to_le32(words[i]); } /* Read the e-fuse value as 32 bit values to be endian independent */ @@ -570,6 +581,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, dma_addr_t dma_desc, dma_buf; unsigned int buf_len, dma_sz = sizeof(*ndesc); void *buf_ptr; + u32 pad[2]; u32 tmp; get_words(&dma_desc, 1, &desc->next_desc); @@ -581,13 +593,15 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, break; } get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); - get_pad_info((u32 *)&buf_ptr, &tmp, ndesc); + get_pad_ptr(&buf_ptr, ndesc); dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); __free_page(buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); } - get_pad_info((u32 *)&buf_ptr, &buf_len, desc); + get_pad_info(&pad[0], &pad[1], &buf_len, desc); + buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + if (buf_ptr) netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); @@ -625,8 +639,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dma_addr_t dma_desc, dma_buff; struct netcp_packet p_info; struct sk_buff *skb; + u32 pad[2]; void *org_buf_ptr; - u32 tmp; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) @@ -639,7 +653,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); - get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); + get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); + org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); if (unlikely(!org_buf_ptr)) { dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); @@ -664,6 +679,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) /* Fill in the page fragment list */ while (dma_desc) { struct page *page; + void *ptr; ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) { @@ -672,14 +688,15 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); - get_pad_info((u32 *)&page, &tmp, ndesc); + get_pad_ptr(&ptr, ndesc); + page = ptr; if (likely(dma_buff && buf_len && page)) { dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, DMA_FROM_DEVICE); } else { - dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n", - (void *)dma_buff, buf_len, page); + dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n", + &dma_buff, buf_len, page); goto free_desc; } @@ -750,7 +767,6 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) unsigned int buf_len, dma_sz; dma_addr_t dma; void *buf_ptr; - u32 tmp; /* Allocate descriptor */ while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { @@ -761,7 +777,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) } get_org_pkt_info(&dma, &buf_len, desc); - get_pad_info((u32 *)&buf_ptr, &tmp, desc); + get_pad_ptr(&buf_ptr, desc); if (unlikely(!dma)) { dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); @@ -813,7 +829,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) struct page *page; dma_addr_t dma; void *bufptr; - u32 pad[2]; + u32 pad[3]; /* Allocate descriptor */ hwdesc = knav_pool_desc_get(netcp->rx_pool); @@ -830,7 +846,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; + pad[2] = primary_buf_len; if (unlikely(!bufptr)) { dev_warn_ratelimited(netcp->ndev_dev, @@ -842,7 +858,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (unlikely(dma_mapping_error(netcp->dev, dma))) goto fail; - pad[0] = (u32)bufptr; + pad[0] = lower_32_bits((uintptr_t)bufptr); + pad[1] = upper_32_bits((uintptr_t)bufptr); } else { /* Allocate a secondary receive queue entry */ @@ -853,8 +870,9 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) } buf_len = PAGE_SIZE; dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); - pad[0] = (u32)page; - pad[1] = 0; + pad[0] = lower_32_bits(dma); + pad[1] = upper_32_bits(dma); + pad[2] = 0; } desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; @@ -864,7 +882,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_org_pkt_info(dma, buf_len, hwdesc); - set_pad_info(pad[0], pad[1], hwdesc); + set_pad_info(pad[0], pad[1], pad[2], hwdesc); set_desc_info(desc_info, pkt_info, hwdesc); /* Push to FDQs */ @@ -935,8 +953,8 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, dma_unmap_single(netcp->dev, dma_buf, buf_len, DMA_TO_DEVICE); else - dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n", - (void *)dma_buf, buf_len); + dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n", + &dma_buf, buf_len); knav_pool_desc_put(netcp->tx_pool, ndesc); ndesc = NULL; @@ -953,11 +971,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { struct knav_dma_desc *desc; + void *ptr; struct sk_buff *skb; unsigned int dma_sz; dma_addr_t dma; int pkts = 0; - u32 tmp; while (budget--) { dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); @@ -970,7 +988,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, continue; } - get_pad_info((u32 *)&skb, &tmp, desc); + get_pad_ptr(&ptr, desc); + skb = ptr; netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); @@ -1059,6 +1078,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) u32 page_offset = frag->page_offset; u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; + u32 desc_dma_32; u32 pkt_info; dma_addr = dma_map_page(dev, page, page_offset, buf_len, @@ -1075,13 +1095,13 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) goto free_descs; } - desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, - (void *)ndesc); + desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc); pkt_info = (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_pkt_info(dma_addr, buf_len, 0, ndesc); - set_words(&desc_dma, 1, &pdesc->next_desc); + desc_dma_32 = (u32)desc_dma; + set_words(&desc_dma_32, 1, &pdesc->next_desc); pkt_len += buf_len; if (pdesc != desc) knav_pool_desc_map(netcp->tx_pool, pdesc, @@ -1129,8 +1149,8 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, p_info.ts_context = NULL; p_info.txtstamp_complete = NULL; p_info.epib = desc->epib; - p_info.psdata = desc->psdata; - memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32)); + p_info.psdata = (u32 __force *)desc->psdata; + memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32)); /* Find out where to inject the packet for transmission */ list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { @@ -1154,11 +1174,12 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, /* update descriptor */ if (p_info.psdata_len) { - u32 *psdata = p_info.psdata; + /* psdata points to both native-endian and device-endian data */ + __le32 *psdata = (void __force *)p_info.psdata; memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, p_info.psdata_len); - set_words(psdata, p_info.psdata_len, psdata); + set_words(p_info.psdata, p_info.psdata_len, psdata); tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } @@ -1173,11 +1194,14 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } set_words(&tmp, 1, &desc->packet_info); - set_words((u32 *)&skb, 1, &desc->pad[0]); + tmp = lower_32_bits((uintptr_t)&skb); + set_words(&tmp, 1, &desc->pad[0]); + tmp = upper_32_bits((uintptr_t)&skb); + set_words(&tmp, 1, &desc->pad[1]); if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { tmp = tx_pipe->switch_to_port; - set_words((u32 *)&tmp, 1, &desc->tag_info); + set_words(&tmp, 1, &desc->tag_info); } /* submit packet descriptor */ @@ -1990,7 +2014,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, /* NAPI register */ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); - netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); + netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); /* Register the network device */ ndev->dev_id = 0; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 4e70e7586a09..d543298d6750 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2178,7 +2178,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) return -ENODEV; } dev_dbg(priv->dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } @@ -2681,7 +2681,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 6f0a4495c7f3..298e059d0498 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1349,8 +1349,7 @@ static int tile_net_open_inner(struct net_device *dev) */ static void tile_net_open_retry(struct work_struct *w) { - struct delayed_work *dw = - container_of(w, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(w); struct tile_net_priv *priv = container_of(dw, struct tile_net_priv, retry_work); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 45ac38d29ed8..54874783476a 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -608,40 +608,25 @@ static void tc_handle_link_change(struct net_device *dev) static int tc_mii_probe(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; u32 dropmask; - /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (lp->mii_bus->phy_map[phy_addr]) { - if (phydev) { - printk(KERN_ERR "%s: multiple PHYs found\n", - dev->name); - return -EINVAL; - } - phydev = lp->mii_bus->phy_map[phy_addr]; - break; - } - } - + phydev = phy_find_first(lp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* attach the mac to the phy */ - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &tc_handle_link_change, lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - dev->name, phydev->drv->name, dev_name(&phydev->dev), - phydev->phy_id); + + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= PHY_BASIC_FEATURES; @@ -669,7 +654,6 @@ static int tc_mii_init(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int err; - int i; lp->mii_bus = mdiobus_alloc(); if (lp->mii_bus == NULL) { @@ -684,18 +668,9 @@ static int tc_mii_init(struct net_device *dev) (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); lp->mii_bus->priv = dev; lp->mii_bus->parent = &lp->pci_dev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mii_bus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(lp->mii_bus); if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mii_bus; err = tc_mii_probe(dev); if (err) goto err_out_unregister_bus; @@ -703,8 +678,6 @@ static int tc_mii_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(lp->mii_bus); -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mii_bus: mdiobus_free(lp->mii_bus); err_out: @@ -882,7 +855,6 @@ static void tc35815_remove_one(struct pci_dev *pdev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); free_netdev(dev); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 522abe2ff25a..902457e43628 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -337,7 +337,6 @@ struct temac_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c index 415de1eaf641..7714aff78b7d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -92,7 +92,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) bus->read = temac_mdio_read; bus->write = temac_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -114,7 +113,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) void temac_mdio_teardown(struct temac_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 7cb9abac95c8..9ead4e269409 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -385,7 +385,6 @@ struct axidma_bd { * @phy_dev: Pointer to PHY device structure attached to the axienet_local * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure - * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space * @dma_err_tasklet: Tasklet structure to process Axi DMA errors @@ -426,7 +425,6 @@ struct axienet_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 507bbb0355c2..63307ea97846 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -212,7 +212,6 @@ issue: bus->read = axienet_mdio_read; bus->write = axienet_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; ret = of_mdiobus_register(bus, np1); @@ -232,7 +231,6 @@ issue: void axienet_mdio_teardown(struct axienet_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index cf468c87ce57..e324b3092380 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -114,7 +114,6 @@ * @phy_dev: pointer to the PHY device * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus - * @mdio_irqs: IRQs table for MDIO bus * @last_link: last link status * @has_mdio: indicates whether MDIO is included in the HW */ @@ -135,7 +134,6 @@ struct net_local { struct device_node *phy_node; struct mii_bus *mii_bus; - int mdio_irqs[PHY_MAX_ADDR]; int last_link; bool has_mdio; @@ -829,7 +827,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) dev_info(dev, "MDIO of the phy is not registered yet\n"); else - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); return 0; } @@ -852,7 +850,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; bus->parent = dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -1196,7 +1193,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) /* Un-register the mii_bus, if configured */ if (lp->has_mdio) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de5c30c9f059..7456569f53c1 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -71,8 +71,14 @@ struct geneve_dev { __be16 dst_port; bool collect_md; struct gro_cells gro_cells; + u32 flags; }; +/* Geneve device flags */ +#define GENEVE_F_UDP_CSUM BIT(0) +#define GENEVE_F_UDP_ZERO_CSUM6_TX BIT(1) +#define GENEVE_F_UDP_ZERO_CSUM6_RX BIT(2) + struct geneve_sock { bool collect_md; struct list_head list; @@ -81,6 +87,7 @@ struct geneve_sock { int refcnt; struct udp_offload udp_offloads; struct hlist_head vni_list[VNI_HASH_SIZE]; + u32 flags; }; static inline __u32 geneve_net_vni_hash(u8 vni[3]) @@ -343,7 +350,7 @@ error: } static struct socket *geneve_create_sock(struct net *net, bool ipv6, - __be16 port) + __be16 port, u32 flags) { struct socket *sock; struct udp_port_cfg udp_conf; @@ -354,6 +361,8 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6, if (ipv6) { udp_conf.family = AF_INET6; udp_conf.ipv6_v6only = 1; + udp_conf.use_udp6_rx_checksums = + !(flags & GENEVE_F_UDP_ZERO_CSUM6_RX); } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = htonl(INADDR_ANY); @@ -371,16 +380,27 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6, static void geneve_notify_add_rx_port(struct geneve_sock *gs) { + struct net_device *dev; struct sock *sk = gs->sock->sk; + struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; + __be16 port = inet_sk(sk)->inet_sport; int err; if (sa_family == AF_INET) { - err = udp_add_offload(&gs->udp_offloads); + err = udp_add_offload(sock_net(sk), &gs->udp_offloads); if (err) pr_warn("geneve: udp_add_offload failed with status %d\n", err); } + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->netdev_ops->ndo_add_geneve_port) + dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, + port); + } + rcu_read_unlock(); } static int geneve_hlen(struct genevehdr *gh) @@ -480,7 +500,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff, /* Create new listen socket if needed */ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, - bool ipv6) + bool ipv6, u32 flags) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; @@ -492,7 +512,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, if (!gs) return ERR_PTR(-ENOMEM); - sock = geneve_create_sock(net, ipv6, port); + sock = geneve_create_sock(net, ipv6, port, flags); if (IS_ERR(sock)) { kfree(gs); return ERR_CAST(sock); @@ -521,8 +541,20 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, static void geneve_notify_del_rx_port(struct geneve_sock *gs) { + struct net_device *dev; struct sock *sk = gs->sock->sk; + struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; + __be16 port = inet_sk(sk)->inet_sport; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->netdev_ops->ndo_del_geneve_port) + dev->netdev_ops->ndo_del_geneve_port(dev, sa_family, + port); + } + + rcu_read_unlock(); if (sa_family == AF_INET) udp_del_offload(&gs->udp_offloads); @@ -575,12 +607,13 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) goto out; } - gs = geneve_socket_create(net, geneve->dst_port, ipv6); + gs = geneve_socket_create(net, geneve->dst_port, ipv6, geneve->flags); if (IS_ERR(gs)) return PTR_ERR(gs); out: gs->collect_md = geneve->collect_md; + gs->flags = geneve->flags; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) geneve->sock6 = gs; @@ -642,11 +675,12 @@ static void geneve_build_header(struct genevehdr *geneveh, static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, - bool csum, bool xnet) + u32 flags, bool xnet) { struct genevehdr *gnvh; int min_headroom; int err; + bool udp_sum = !!(flags & GENEVE_F_UDP_CSUM); skb_scrub_packet(skb, xnet); @@ -658,7 +692,7 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, goto free_rt; } - skb = udp_tunnel_handle_offloads(skb, csum); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto free_rt; @@ -678,11 +712,12 @@ free_rt: #if IS_ENABLED(CONFIG_IPV6) static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, - bool csum, bool xnet) + u32 flags, bool xnet) { struct genevehdr *gnvh; int min_headroom; int err; + bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM6_TX); skb_scrub_packet(skb, xnet); @@ -694,7 +729,7 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, goto free_dst; } - skb = udp_tunnel_handle_offloads(skb, csum); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto free_dst; @@ -824,9 +859,9 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct flowi4 fl4; __u8 tos, ttl; __be16 sport; - bool udp_csum; __be16 df; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); + u32 flags = geneve->flags; if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { @@ -857,9 +892,13 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (key->tun_flags & TUNNEL_GENEVE_OPT) opts = ip_tunnel_info_opts(info); - udp_csum = !!(key->tun_flags & TUNNEL_CSUM); + if (key->tun_flags & TUNNEL_CSUM) + flags |= GENEVE_F_UDP_CSUM; + else + flags &= ~GENEVE_F_UDP_CSUM; + err = geneve_build_skb(rt, skb, key->tun_flags, vni, - info->options_len, opts, udp_csum, xnet); + info->options_len, opts, flags, xnet); if (unlikely(err)) goto err; @@ -867,9 +906,8 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { - udp_csum = false; err = geneve_build_skb(rt, skb, 0, geneve->vni, - 0, NULL, udp_csum, xnet); + 0, NULL, flags, xnet); if (unlikely(err)) goto err; @@ -880,12 +918,11 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); df = 0; } - err = udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, - tos, ttl, df, sport, geneve->dst_port, - !net_eq(geneve->net, dev_net(geneve->dev)), - !udp_csum); + udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, + tos, ttl, df, sport, geneve->dst_port, + !net_eq(geneve->net, dev_net(geneve->dev)), + !(flags & GENEVE_F_UDP_CSUM)); - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error: @@ -912,8 +949,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct flowi6 fl6; __u8 prio, ttl; __be16 sport; - bool udp_csum; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); + u32 flags = geneve->flags; if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { @@ -942,19 +979,22 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (key->tun_flags & TUNNEL_GENEVE_OPT) opts = ip_tunnel_info_opts(info); - udp_csum = !!(key->tun_flags & TUNNEL_CSUM); + if (key->tun_flags & TUNNEL_CSUM) + flags |= GENEVE_F_UDP_CSUM; + else + flags &= ~GENEVE_F_UDP_CSUM; + err = geneve6_build_skb(dst, skb, key->tun_flags, vni, info->options_len, opts, - udp_csum, xnet); + flags, xnet); if (unlikely(err)) goto err; prio = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; } else { - udp_csum = false; err = geneve6_build_skb(dst, skb, 0, geneve->vni, - 0, NULL, udp_csum, xnet); + 0, NULL, flags, xnet); if (unlikely(err)) goto err; @@ -964,11 +1004,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = 1; ttl = ttl ? : ip6_dst_hoplimit(dst); } - err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, - &fl6.saddr, &fl6.daddr, prio, ttl, - sport, geneve->dst_port, !udp_csum); - - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); + udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, + &fl6.saddr, &fl6.daddr, prio, ttl, + sport, geneve->dst_port, + !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX)); return NETDEV_TX_OK; tx_error: @@ -1067,6 +1106,30 @@ static struct device_type geneve_type = { .name = "geneve", }; +/* Calls the ndo_add_geneve_port of the caller in order to + * supply the listening GENEVE udp ports. Callers are expected + * to implement the ndo_add_geneve_port. + */ +void geneve_get_rx_port(struct net_device *dev) +{ + struct net *net = dev_net(dev); + struct geneve_net *gn = net_generic(net, geneve_net_id); + struct geneve_sock *gs; + sa_family_t sa_family; + struct sock *sk; + __be16 port; + + rcu_read_lock(); + list_for_each_entry_rcu(gs, &gn->sock_list, list) { + sk = gs->sock->sk; + sa_family = sk->sk_family; + port = inet_sk(sk)->inet_sport; + dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(geneve_get_rx_port); + /* Initialize the device structure. */ static void geneve_setup(struct net_device *dev) { @@ -1099,6 +1162,9 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, + [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, + [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -1152,12 +1218,12 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, static int geneve_configure(struct net *net, struct net_device *dev, union geneve_addr *remote, __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port, - bool metadata) + bool metadata, u32 flags) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); bool tun_collect_md, tun_on_same_port; - int err; + int err, encap_len; if (!remote) return -EINVAL; @@ -1183,12 +1249,21 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->tos = tos; geneve->dst_port = dst_port; geneve->collect_md = metadata; + geneve->flags = flags; t = geneve_find_dev(gn, dst_port, remote, geneve->vni, &tun_on_same_port, &tun_collect_md); if (t) return -EBUSY; + /* make enough headroom for basic scenario */ + encap_len = GENEVE_BASE_HLEN + ETH_HLEN; + if (remote->sa.sa_family == AF_INET) + encap_len += sizeof(struct iphdr); + else + encap_len += sizeof(struct ipv6hdr); + dev->needed_headroom = encap_len + ETH_HLEN; + if (metadata) { if (tun_on_same_port) return -EPERM; @@ -1213,6 +1288,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, bool metadata = false; union geneve_addr remote = geneve_remote_unspec; __u32 vni = 0; + u32 flags = 0; if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) return -EINVAL; @@ -1253,8 +1329,20 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_COLLECT_METADATA]) metadata = true; + if (data[IFLA_GENEVE_UDP_CSUM] && + nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + flags |= GENEVE_F_UDP_CSUM; + + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && + nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) + flags |= GENEVE_F_UDP_ZERO_CSUM6_TX; + + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] && + nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) + flags |= GENEVE_F_UDP_ZERO_CSUM6_RX; + return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port, - metadata); + metadata, flags); } static void geneve_dellink(struct net_device *dev, struct list_head *head) @@ -1273,6 +1361,9 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ 0; } @@ -1309,6 +1400,14 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } + if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, + !!(geneve->flags & GENEVE_F_UDP_CSUM)) || + nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, + !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) || + nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_RX))) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1342,7 +1441,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; err = geneve_configure(net, dev, &geneve_remote_unspec, - 0, 0, 0, htons(dst_port), true); + 0, 0, 0, htons(dst_port), true, 0); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 7c4a4151ef0f..5a1e98547031 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -683,14 +683,20 @@ static void sixpack_close(struct tty_struct *tty) if (!atomic_dec_and_test(&sp->refcnt)) down(&sp->dead_sem); - unregister_netdev(sp->dev); + /* We must stop the queue to avoid potentially scribbling + * on the free buffers. The sp->dead_sem is not sufficient + * to protect us from sp->xbuff access. + */ + netif_stop_queue(sp->dev); - del_timer(&sp->tx_t); - del_timer(&sp->resync_t); + del_timer_sync(&sp->tx_t); + del_timer_sync(&sp->resync_t); /* Free all 6pack frame buffers. */ kfree(sp->rbuff); kfree(sp->xbuff); + + unregister_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 216bfd350169..85828f153445 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -797,14 +797,19 @@ static void mkiss_close(struct tty_struct *tty) */ if (!atomic_dec_and_test(&ax->refcnt)) down(&ax->dead_sem); - - unregister_netdev(ax->dev); + /* + * Halt the transmit queue so that a new transmit cannot scribble + * on our buffers + */ + netif_stop_queue(ax->dev); /* Free all AX25 frame buffers. */ kfree(ax->rbuff); kfree(ax->xbuff); ax->tty = NULL; + + unregister_netdev(ax->dev); } /* Perform I/O control on an active ax25 channel. */ diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5fa98f599b3d..f4130af09244 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -124,37 +124,22 @@ struct ndis_tcp_ip_checksum_info; /* * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame * within the RNDIS + * + * The size of this structure is less than 48 bytes and we can now + * place this structure in the skb->cb field. */ struct hv_netvsc_packet { /* Bookkeeping stuff */ - u32 status; - - bool is_data_pkt; - bool xmit_more; /* from skb */ - bool cp_partial; /* partial copy into send buffer */ + u8 cp_partial; /* partial copy into send buffer */ - u16 vlan_tci; + u8 rmsg_size; /* RNDIS header and PPI size */ + u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */ + u8 page_buf_cnt; u16 q_idx; - struct vmbus_channel *channel; - - u64 send_completion_tid; - void *send_completion_ctx; - void (*send_completion)(void *context); - u32 send_buf_index; - /* This points to the memory after page_buf */ - struct rndis_message *rndis_msg; - - u32 rmsg_size; /* RNDIS header and PPI size */ - u32 rmsg_pgcnt; /* page count of RNDIS header and PPI */ - u32 total_data_buflen; - /* Points to the send/receive buffer where the ethernet frame is */ - void *data; - u32 page_buf_cnt; - struct hv_page_buffer *page_buf; }; struct netvsc_device_info { @@ -177,7 +162,6 @@ struct rndis_device { enum rndis_device_state state; bool link_state; - bool link_change; atomic_t new_req_id; spinlock_t request_lock; @@ -188,16 +172,22 @@ struct rndis_device { /* Interface */ +struct rndis_message; int netvsc_device_add(struct hv_device *device, void *additional_info); int netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, - struct hv_netvsc_packet *packet); + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer **page_buffer, + struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); -void netvsc_xmit_completion(void *context); int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, - struct ndis_tcp_ip_checksum_info *csum_info); + void **data, + struct ndis_tcp_ip_checksum_info *csum_info, + struct vmbus_channel *channel, + u16 vlan_tci); void netvsc_channel_cb(void *context); int rndis_filter_open(struct hv_device *dev); int rndis_filter_close(struct hv_device *dev); @@ -205,12 +195,13 @@ int rndis_filter_device_add(struct hv_device *dev, void *additional_info); void rndis_filter_device_remove(struct hv_device *dev); int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt); + struct hv_netvsc_packet *pkt, + void **data, + struct vmbus_channel *channel); int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); - #define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF) #define NVSP_PROTOCOL_VERSION_1 2 @@ -633,7 +624,6 @@ struct nvsp_message { #define RNDIS_PKT_ALIGN_DEFAULT 8 struct multi_send_data { - spinlock_t lock; /* protect struct multi_send_data */ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ u32 count; /* counter of batched packets */ }; @@ -644,11 +634,24 @@ struct netvsc_stats { struct u64_stats_sync syncp; }; +struct netvsc_reconfig { + struct list_head list; + u32 event; +}; + /* The context of the netvsc device */ struct net_device_context { /* point back to our device context */ struct hv_device *device_ctx; + /* reconfigure work */ struct delayed_work dwork; + /* last reconfig time */ + unsigned long last_reconfig; + /* reconfig events */ + struct list_head reconfig_events; + /* list protection */ + spinlock_t lock; + struct work_struct work; u32 msg_enable; /* debug level */ @@ -1260,5 +1263,4 @@ struct rndis_message { #define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP) #define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP) - #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 51e4c0fd0a74..059fc5231601 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -38,7 +38,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) { struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); - int i; net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) @@ -58,9 +57,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; - for (i = 0; i < num_online_cpus(); i++) - spin_lock_init(&net_device->msd[i].lock); - hv_set_drvdata(device, net_device); return net_device; } @@ -610,6 +606,7 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, } static void netvsc_send_completion(struct netvsc_device *net_device, + struct vmbus_channel *incoming_channel, struct hv_device *device, struct vmpacket_descriptor *packet) { @@ -617,6 +614,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device, struct hv_netvsc_packet *nvsc_packet; struct net_device *ndev; u32 send_index; + struct sk_buff *skb; ndev = net_device->ndev; @@ -642,18 +640,17 @@ static void netvsc_send_completion(struct netvsc_device *net_device, int queue_sends; /* Get the send context */ - nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) - packet->trans_id; + skb = (struct sk_buff *)(unsigned long)packet->trans_id; /* Notify the layer above us */ - if (nvsc_packet) { + if (skb) { + nvsc_packet = (struct hv_netvsc_packet *) skb->cb; send_index = nvsc_packet->send_buf_index; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); q_idx = nvsc_packet->q_idx; - channel = nvsc_packet->channel; - nvsc_packet->send_completion(nvsc_packet-> - send_completion_ctx); + channel = incoming_channel; + dev_kfree_skb_any(skb); } num_outstanding_sends = @@ -705,12 +702,17 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, unsigned int section_index, u32 pend_size, - struct hv_netvsc_packet *packet) + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer **pb, + struct sk_buff *skb) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) + pend_size; int i; + bool is_data_pkt = (skb != NULL) ? true : false; + bool xmit_more = (skb != NULL) ? skb->xmit_more : false; u32 msg_size = 0; u32 padding = 0; u32 remain = packet->total_data_buflen % net_device->pkt_align; @@ -718,17 +720,17 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, packet->page_buf_cnt; /* Add padding */ - if (packet->is_data_pkt && packet->xmit_more && remain && + if (is_data_pkt && xmit_more && remain && !packet->cp_partial) { padding = net_device->pkt_align - remain; - packet->rndis_msg->msg_len += padding; + rndis_msg->msg_len += padding; packet->total_data_buflen += padding; } for (i = 0; i < page_count; i++) { - char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); - u32 offset = packet->page_buf[i].offset; - u32 len = packet->page_buf[i].len; + char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT); + u32 offset = (*pb)[i].offset; + u32 len = (*pb)[i].len; memcpy(dest, (src + offset), len); msg_size += len; @@ -745,19 +747,22 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, static inline int netvsc_send_pkt( struct hv_netvsc_packet *packet, - struct netvsc_device *net_device) + struct netvsc_device *net_device, + struct hv_page_buffer **pb, + struct sk_buff *skb) { struct nvsp_message nvmsg; - struct vmbus_channel *out_channel = packet->channel; u16 q_idx = packet->q_idx; + struct vmbus_channel *out_channel = net_device->chn_table[q_idx]; struct net_device *ndev = net_device->ndev; u64 req_id; int ret; struct hv_page_buffer *pgbuf; u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); + bool xmit_more = (skb != NULL) ? skb->xmit_more : false; nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; - if (packet->is_data_pkt) { + if (skb != NULL) { /* 0 is RMC_DATA; */ nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; } else { @@ -773,10 +778,7 @@ static inline int netvsc_send_pkt( nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = packet->total_data_buflen; - if (packet->send_completion) - req_id = (ulong)packet; - else - req_id = 0; + req_id = (ulong)skb; if (out_channel->rescind) return -ENODEV; @@ -789,11 +791,11 @@ static inline int netvsc_send_pkt( * unnecessarily. */ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1)) - packet->xmit_more = false; + xmit_more = false; if (packet->page_buf_cnt) { - pgbuf = packet->cp_partial ? packet->page_buf + - packet->rmsg_pgcnt : packet->page_buf; + pgbuf = packet->cp_partial ? (*pb) + + packet->rmsg_pgcnt : (*pb); ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, pgbuf, packet->page_buf_cnt, @@ -801,14 +803,14 @@ static inline int netvsc_send_pkt( sizeof(struct nvsp_message), req_id, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, - !packet->xmit_more); + !xmit_more); } else { ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, sizeof(struct nvsp_message), req_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, - !packet->xmit_more); + !xmit_more); } if (ret == 0) { @@ -840,7 +842,10 @@ static inline int netvsc_send_pkt( } int netvsc_send(struct hv_device *device, - struct hv_netvsc_packet *packet) + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer **pb, + struct sk_buff *skb) { struct netvsc_device *net_device; int ret = 0, m_ret = 0; @@ -848,33 +853,35 @@ int netvsc_send(struct hv_device *device, u16 q_idx = packet->q_idx; u32 pktlen = packet->total_data_buflen, msd_len = 0; unsigned int section_index = NETVSC_INVALID_INDEX; - unsigned long flag; struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; bool try_batch; + bool xmit_more = (skb != NULL) ? skb->xmit_more : false; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; out_channel = net_device->chn_table[q_idx]; - if (!out_channel) { - out_channel = device->channel; - q_idx = 0; - packet->q_idx = 0; - } - packet->channel = out_channel; + packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; + /* Send control message directly without accessing msd (Multi-Send + * Data) field which may be changed during data packet processing. + */ + if (!skb) { + cur_send = packet; + goto send_now; + } + msdp = &net_device->msd[q_idx]; /* batch packets in send buffer if possible */ - spin_lock_irqsave(&msdp->lock, flag); if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; - try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count < + try_batch = (skb != NULL) && msd_len > 0 && msdp->count < net_device->max_pkt; if (try_batch && msd_len + pktlen + net_device->pkt_align < @@ -886,7 +893,7 @@ int netvsc_send(struct hv_device *device, section_index = msdp->pkt->send_buf_index; packet->cp_partial = true; - } else if (packet->is_data_pkt && pktlen + net_device->pkt_align < + } else if ((skb != NULL) && pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); if (section_index != NETVSC_INVALID_INDEX) { @@ -900,7 +907,7 @@ int netvsc_send(struct hv_device *device, if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet); + packet, rndis_msg, pb, skb); packet->send_buf_index = section_index; @@ -913,9 +920,9 @@ int netvsc_send(struct hv_device *device, } if (msdp->pkt) - netvsc_xmit_completion(msdp->pkt); + dev_kfree_skb_any(skb); - if (packet->xmit_more && !packet->cp_partial) { + if (xmit_more && !packet->cp_partial) { msdp->pkt = packet; msdp->count++; } else { @@ -930,20 +937,19 @@ int netvsc_send(struct hv_device *device, cur_send = packet; } - spin_unlock_irqrestore(&msdp->lock, flag); - if (msd_send) { - m_ret = netvsc_send_pkt(msd_send, net_device); + m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, msd_send->send_buf_index); - netvsc_xmit_completion(msd_send); + dev_kfree_skb_any(skb); } } +send_now: if (cur_send) - ret = netvsc_send_pkt(cur_send, net_device); + ret = netvsc_send_pkt(cur_send, net_device, pb, skb); if (ret != 0 && section_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, section_index); @@ -1009,6 +1015,7 @@ static void netvsc_receive(struct netvsc_device *net_device, int i; int count = 0; struct net_device *ndev; + void *data; ndev = net_device->ndev; @@ -1043,22 +1050,19 @@ static void netvsc_receive(struct netvsc_device *net_device, } count = vmxferpage_packet->range_cnt; - netvsc_packet->channel = channel; /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { /* Initialize the netvsc packet */ - netvsc_packet->status = NVSP_STAT_SUCCESS; - netvsc_packet->data = (void *)((unsigned long)net_device-> + data = (void *)((unsigned long)net_device-> recv_buf + vmxferpage_packet->ranges[i].byte_offset); netvsc_packet->total_data_buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - rndis_filter_receive(device, netvsc_packet); + status = rndis_filter_receive(device, netvsc_packet, &data, + channel); - if (netvsc_packet->status != NVSP_STAT_SUCCESS) - status = NVSP_STAT_FAIL; } netvsc_send_recv_completion(device, channel, net_device, @@ -1150,6 +1154,7 @@ void netvsc_channel_cb(void *context) switch (desc->type) { case VM_PKT_COMP: netvsc_send_completion(net_device, + channel, device, desc); break; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 409b48e1e589..1c8db9afdcda 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,6 +42,7 @@ #define RING_SIZE_MIN 64 +#define LINKCHANGE_INT (2 * HZ) static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); @@ -272,17 +273,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); } - return q_idx; -} - -void netvsc_xmit_completion(void *context) -{ - struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; - struct sk_buff *skb = (struct sk_buff *) - (unsigned long)packet->send_completion_tid; + if (!nvsc_dev->chn_table[q_idx]) + q_idx = 0; - if (skb) - dev_kfree_skb_any(skb); + return q_idx; } static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, @@ -320,9 +314,10 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, } static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, - struct hv_netvsc_packet *packet) + struct hv_netvsc_packet *packet, + struct hv_page_buffer **page_buf) { - struct hv_page_buffer *pb = packet->page_buf; + struct hv_page_buffer *pb = *page_buf; u32 slots_used = 0; char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; @@ -432,8 +427,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 net_trans_info; u32 hash; u32 skb_length; - u32 pkt_sz; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; + struct hv_page_buffer *pb = page_buf; struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); /* We will atmost need two pages to describe the rndis @@ -460,42 +455,34 @@ check_size: goto check_size; } - pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; - - ret = skb_cow_head(skb, pkt_sz); + /* + * Place the rndis header in the skb head room and + * the skb->cb will be used for hv_netvsc_packet + * structure. + */ + ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); if (ret) { netdev_err(net, "unable to alloc hv_netvsc_packet\n"); ret = -ENOMEM; goto drop; } - /* Use the headroom for building up the packet */ - packet = (struct hv_netvsc_packet *)skb->head; + /* Use the skb control buffer for building up the packet */ + BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > + FIELD_SIZEOF(struct sk_buff, cb)); + packet = (struct hv_netvsc_packet *)skb->cb; - packet->status = 0; - packet->xmit_more = skb->xmit_more; - - packet->vlan_tci = skb->vlan_tci; - packet->page_buf = page_buf; packet->q_idx = skb_get_queue_mapping(skb); - packet->is_data_pkt = true; packet->total_data_buflen = skb->len; - packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + - sizeof(struct hv_netvsc_packet)); - - memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE); + rndis_msg = (struct rndis_message *)skb->head; - /* Set the completion routine */ - packet->send_completion = netvsc_xmit_completion; - packet->send_completion_ctx = packet; - packet->send_completion_tid = (unsigned long)skb; + memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); - isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; + isvlan = skb->vlan_tci & VLAN_TAG_PRESENT; /* Add the rndis header */ - rndis_msg = packet->rndis_msg; rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; rndis_pkt = &rndis_msg->msg.pkt; @@ -521,8 +508,8 @@ check_size: IEEE_8021Q_INFO); vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + ppi->ppi_offset); - vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK; - vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >> + vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; + vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } @@ -617,9 +604,10 @@ do_send: rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, - skb, packet); + skb, packet, &pb); - ret = netvsc_send(net_device_ctx->device_ctx, packet); + ret = netvsc_send(net_device_ctx->device_ctx, packet, + rndis_msg, &pb, skb); drop: if (ret == 0) { @@ -647,37 +635,33 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_device *net_device; - struct rndis_device *rdev; - - net_device = hv_get_drvdata(device_obj); - rdev = net_device->extension; + struct netvsc_reconfig *event; + unsigned long flags; - switch (indicate->status) { - case RNDIS_STATUS_MEDIA_CONNECT: - rdev->link_state = false; - break; - case RNDIS_STATUS_MEDIA_DISCONNECT: - rdev->link_state = true; - break; - case RNDIS_STATUS_NETWORK_CHANGE: - rdev->link_change = true; - break; - default: + /* Handle link change statuses only */ + if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && + indicate->status != RNDIS_STATUS_MEDIA_CONNECT && + indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) return; - } + net_device = hv_get_drvdata(device_obj); net = net_device->ndev; if (!net || net->reg_state != NETREG_REGISTERED) return; ndev_ctx = netdev_priv(net); - if (!rdev->link_state) { - schedule_delayed_work(&ndev_ctx->dwork, 0); - schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); - } else { - schedule_delayed_work(&ndev_ctx->dwork, 0); - } + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + event->event = indicate->status; + + spin_lock_irqsave(&ndev_ctx->lock, flags); + list_add_tail(&event->list, &ndev_ctx->reconfig_events); + spin_unlock_irqrestore(&ndev_ctx->lock, flags); + + schedule_delayed_work(&ndev_ctx->dwork, 0); } /* @@ -686,7 +670,10 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, */ int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, - struct ndis_tcp_ip_checksum_info *csum_info) + void **data, + struct ndis_tcp_ip_checksum_info *csum_info, + struct vmbus_channel *channel, + u16 vlan_tci) { struct net_device *net; struct net_device_context *net_device_ctx; @@ -695,8 +682,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; if (!net || net->reg_state != NETREG_REGISTERED) { - packet->status = NVSP_STAT_FAIL; - return 0; + return NVSP_STAT_FAIL; } net_device_ctx = netdev_priv(net); rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); @@ -705,15 +691,14 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); if (unlikely(!skb)) { ++net->stats.rx_dropped; - packet->status = NVSP_STAT_FAIL; - return 0; + return NVSP_STAT_FAIL; } /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - memcpy(skb_put(skb, packet->total_data_buflen), packet->data, + memcpy(skb_put(skb, packet->total_data_buflen), *data, packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); @@ -728,11 +713,11 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb->ip_summed = CHECKSUM_NONE; } - if (packet->vlan_tci & VLAN_TAG_PRESENT) + if (vlan_tci & VLAN_TAG_PRESENT) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - packet->vlan_tci); + vlan_tci); - skb_record_rx_queue(skb, packet->channel-> + skb_record_rx_queue(skb, channel-> offermsg.offer.sub_channel_index); u64_stats_update_begin(&rx_stats->syncp); @@ -1009,12 +994,9 @@ static const struct net_device_ops device_ops = { }; /* - * Send GARP packet to network peers after migrations. - * After Quick Migration, the network is not immediately operational in the - * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add - * another netif_notify_peers() into a delayed work, otherwise GARP packet - * will not be sent after quick migration, and cause network disconnection. - * Also, we update the carrier status here. + * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link + * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is + * present send GARP packet to network peers with netif_notify_peers(). */ static void netvsc_link_change(struct work_struct *w) { @@ -1022,36 +1004,89 @@ static void netvsc_link_change(struct work_struct *w) struct net_device *net; struct netvsc_device *net_device; struct rndis_device *rdev; - bool notify, refresh = false; - char *argv[] = { "/etc/init.d/network", "restart", NULL }; - char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; - - rtnl_lock(); + struct netvsc_reconfig *event = NULL; + bool notify = false, reschedule = false; + unsigned long flags, next_reconfig, delay; ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); rdev = net_device->extension; net = net_device->ndev; - if (rdev->link_state) { - netif_carrier_off(net); - notify = false; - } else { - netif_carrier_on(net); - notify = true; - if (rdev->link_change) { - rdev->link_change = false; - refresh = true; + next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; + if (time_is_after_jiffies(next_reconfig)) { + /* link_watch only sends one notification with current state + * per second, avoid doing reconfig more frequently. Handle + * wrap around. + */ + delay = next_reconfig - jiffies; + delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; + schedule_delayed_work(&ndev_ctx->dwork, delay); + return; + } + ndev_ctx->last_reconfig = jiffies; + + spin_lock_irqsave(&ndev_ctx->lock, flags); + if (!list_empty(&ndev_ctx->reconfig_events)) { + event = list_first_entry(&ndev_ctx->reconfig_events, + struct netvsc_reconfig, list); + list_del(&event->list); + reschedule = !list_empty(&ndev_ctx->reconfig_events); + } + spin_unlock_irqrestore(&ndev_ctx->lock, flags); + + if (!event) + return; + + rtnl_lock(); + + switch (event->event) { + /* Only the following events are possible due to the check in + * netvsc_linkstatus_callback() + */ + case RNDIS_STATUS_MEDIA_CONNECT: + if (rdev->link_state) { + rdev->link_state = false; + netif_carrier_on(net); + netif_tx_wake_all_queues(net); + } else { + notify = true; + } + kfree(event); + break; + case RNDIS_STATUS_MEDIA_DISCONNECT: + if (!rdev->link_state) { + rdev->link_state = true; + netif_carrier_off(net); + netif_tx_stop_all_queues(net); + } + kfree(event); + break; + case RNDIS_STATUS_NETWORK_CHANGE: + /* Only makes sense if carrier is present */ + if (!rdev->link_state) { + rdev->link_state = true; + netif_carrier_off(net); + netif_tx_stop_all_queues(net); + event->event = RNDIS_STATUS_MEDIA_CONNECT; + spin_lock_irqsave(&ndev_ctx->lock, flags); + list_add_tail(&event->list, &ndev_ctx->reconfig_events); + spin_unlock_irqrestore(&ndev_ctx->lock, flags); + reschedule = true; } + break; } rtnl_unlock(); - if (refresh) - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); - if (notify) netdev_notify_peers(net); + + /* link_watch only sends one notification with current state per + * second, handle next reconfig event in 2 seconds. + */ + if (reschedule) + schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); } static void netvsc_free_netdev(struct net_device *netdev) @@ -1071,16 +1106,12 @@ static int netvsc_probe(struct hv_device *dev, struct netvsc_device_info device_info; struct netvsc_device *nvdev; int ret; - u32 max_needed_headroom; net = alloc_etherdev_mq(sizeof(struct net_device_context), num_online_cpus()); if (!net) return -ENOMEM; - max_needed_headroom = sizeof(struct hv_netvsc_packet) + - RNDIS_AND_PPI_SIZE; - netif_carrier_off(net); net_device_ctx = netdev_priv(net); @@ -1106,6 +1137,9 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); + spin_lock_init(&net_device_ctx->lock); + INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + net->netdev_ops = &device_ops; net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | @@ -1116,13 +1150,6 @@ static int netvsc_probe(struct hv_device *dev, net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); - /* - * Request additional head room in the skb. - * We will use this space to build the rndis - * heaser and other state we need to maintain. - */ - net->needed_headroom = max_needed_headroom; - /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; @@ -1145,8 +1172,6 @@ static int netvsc_probe(struct hv_device *dev, pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); netvsc_free_netdev(net); - } else { - schedule_delayed_work(&net_device_ctx->dwork, 0); } return ret; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5931a799aa17..a37bbda37ffa 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -210,37 +210,33 @@ static int rndis_filter_send_request(struct rndis_device *dev, int ret; struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; + struct hv_page_buffer *pb = page_buf; /* Setup the packet to send it */ packet = &req->pkt; - packet->is_data_pkt = false; packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; - packet->page_buf = page_buf; - packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >> + pb[0].pfn = virt_to_phys(&req->request_msg) >> PAGE_SHIFT; - packet->page_buf[0].len = req->request_msg.msg_len; - packet->page_buf[0].offset = + pb[0].len = req->request_msg.msg_len; + pb[0].offset = (unsigned long)&req->request_msg & (PAGE_SIZE - 1); /* Add one page_buf when request_msg crossing page boundary */ - if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) { + if (pb[0].offset + pb[0].len > PAGE_SIZE) { packet->page_buf_cnt++; - packet->page_buf[0].len = PAGE_SIZE - - packet->page_buf[0].offset; - packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg - + packet->page_buf[0].len) >> PAGE_SHIFT; - packet->page_buf[1].offset = 0; - packet->page_buf[1].len = req->request_msg.msg_len - - packet->page_buf[0].len; + pb[0].len = PAGE_SIZE - + pb[0].offset; + pb[1].pfn = virt_to_phys((void *)&req->request_msg + + pb[0].len) >> PAGE_SHIFT; + pb[1].offset = 0; + pb[1].len = req->request_msg.msg_len - + pb[0].len; } - packet->send_completion = NULL; - packet->xmit_more = false; - - ret = netvsc_send(dev->net_dev->dev, packet); + ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL); return ret; } @@ -348,14 +344,17 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) return NULL; } -static void rndis_filter_receive_data(struct rndis_device *dev, +static int rndis_filter_receive_data(struct rndis_device *dev, struct rndis_message *msg, - struct hv_netvsc_packet *pkt) + struct hv_netvsc_packet *pkt, + void **data, + struct vmbus_channel *channel) { struct rndis_packet *rndis_pkt; u32 data_offset; struct ndis_pkt_8021q_info *vlan; struct ndis_tcp_ip_checksum_info *csum_info; + u16 vlan_tci = 0; rndis_pkt = &msg->msg.pkt; @@ -373,7 +372,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev, "overflow detected (got %u, min %u)" "...dropping this message!\n", pkt->total_data_buflen, rndis_pkt->data_len); - return; + return NVSP_STAT_FAIL; } /* @@ -382,22 +381,23 @@ static void rndis_filter_receive_data(struct rndis_device *dev, * the data packet to the stack, without the rndis trailer padding */ pkt->total_data_buflen = rndis_pkt->data_len; - pkt->data = (void *)((unsigned long)pkt->data + data_offset); + *data = (void *)((unsigned long)(*data) + data_offset); vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); if (vlan) { - pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | + vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); - } else { - pkt->vlan_tci = 0; } csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info); + return netvsc_recv_callback(dev->net_dev->dev, pkt, data, + csum_info, channel, vlan_tci); } int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt) + struct hv_netvsc_packet *pkt, + void **data, + struct vmbus_channel *channel) { struct netvsc_device *net_dev = hv_get_drvdata(dev); struct rndis_device *rndis_dev; @@ -406,7 +406,7 @@ int rndis_filter_receive(struct hv_device *dev, int ret = 0; if (!net_dev) { - ret = -EINVAL; + ret = NVSP_STAT_FAIL; goto exit; } @@ -416,7 +416,7 @@ int rndis_filter_receive(struct hv_device *dev, if (!net_dev->extension) { netdev_err(ndev, "got rndis message but no rndis device - " "dropping this message!\n"); - ret = -ENODEV; + ret = NVSP_STAT_FAIL; goto exit; } @@ -424,11 +424,11 @@ int rndis_filter_receive(struct hv_device *dev, if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { netdev_err(ndev, "got rndis message but rndis device " "uninitialized...dropping this message!\n"); - ret = -ENODEV; + ret = NVSP_STAT_FAIL; goto exit; } - rndis_msg = pkt->data; + rndis_msg = *data; if (netif_msg_rx_err(net_dev->nd_ctx)) dump_rndis_message(dev, rndis_msg); @@ -436,7 +436,8 @@ int rndis_filter_receive(struct hv_device *dev, switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: /* data msg */ - rndis_filter_receive_data(rndis_dev, rndis_msg, pkt); + ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt, + data, channel); break; case RNDIS_MSG_INIT_C: @@ -459,9 +460,6 @@ int rndis_filter_receive(struct hv_device *dev, } exit: - if (ret != 0) - pkt->status = NVSP_STAT_FAIL; - return ret; } diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index ce5f1a21e6d7..3057a8df4ce9 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -71,3 +71,14 @@ config IEEE802154_ATUSB This driver can also be built as a module. To do so say M here. The module will be called 'atusb'. + +config IEEE802154_ADF7242 + tristate "ADF7242 transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI + ---help--- + Say Y here to enable the ADF7242 SPI 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so, say M here. + the module will be called 'adf7242'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index cf1d2a6db023..3a923d339497 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o +obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c new file mode 100644 index 000000000000..89154c079788 --- /dev/null +++ b/drivers/net/ieee802154/adf7242.c @@ -0,0 +1,1285 @@ +/* + * Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver + * + * Copyright 2009-2015 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + * + * http://www.analog.com/ADF7242 + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/firmware.h> +#include <linux/spi/spi.h> +#include <linux/skbuff.h> +#include <linux/of.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/debugfs.h> +#include <linux/bitops.h> +#include <linux/ieee802154.h> +#include <net/mac802154.h> +#include <net/cfg802154.h> + +#define FIRMWARE "adf7242_firmware.bin" +#define MAX_POLL_LOOPS 200 + +/* All Registers */ + +#define REG_EXT_CTRL 0x100 /* RW External LNA/PA and internal PA control */ +#define REG_TX_FSK_TEST 0x101 /* RW TX FSK test mode configuration */ +#define REG_CCA1 0x105 /* RW RSSI threshold for CCA */ +#define REG_CCA2 0x106 /* RW CCA mode configuration */ +#define REG_BUFFERCFG 0x107 /* RW RX_BUFFER overwrite control */ +#define REG_PKT_CFG 0x108 /* RW FCS evaluation configuration */ +#define REG_DELAYCFG0 0x109 /* RW RC_RX command to SFD or sync word delay */ +#define REG_DELAYCFG1 0x10A /* RW RC_TX command to TX state */ +#define REG_DELAYCFG2 0x10B /* RW Mac delay extension */ +#define REG_SYNC_WORD0 0x10C /* RW sync word bits [7:0] of [23:0] */ +#define REG_SYNC_WORD1 0x10D /* RW sync word bits [15:8] of [23:0] */ +#define REG_SYNC_WORD2 0x10E /* RW sync word bits [23:16] of [23:0] */ +#define REG_SYNC_CONFIG 0x10F /* RW sync word configuration */ +#define REG_RC_CFG 0x13E /* RW RX / TX packet configuration */ +#define REG_RC_VAR44 0x13F /* RW RESERVED */ +#define REG_CH_FREQ0 0x300 /* RW Channel Frequency Settings - Low */ +#define REG_CH_FREQ1 0x301 /* RW Channel Frequency Settings - Middle */ +#define REG_CH_FREQ2 0x302 /* RW Channel Frequency Settings - High */ +#define REG_TX_FD 0x304 /* RW TX Frequency Deviation Register */ +#define REG_DM_CFG0 0x305 /* RW RX Discriminator BW Register */ +#define REG_TX_M 0x306 /* RW TX Mode Register */ +#define REG_RX_M 0x307 /* RW RX Mode Register */ +#define REG_RRB 0x30C /* R RSSI Readback Register */ +#define REG_LRB 0x30D /* R Link Quality Readback Register */ +#define REG_DR0 0x30E /* RW bits [15:8] of [15:0] data rate setting */ +#define REG_DR1 0x30F /* RW bits [7:0] of [15:0] data rate setting */ +#define REG_PRAMPG 0x313 /* RW RESERVED */ +#define REG_TXPB 0x314 /* RW TX Packet Storage Base Address */ +#define REG_RXPB 0x315 /* RW RX Packet Storage Base Address */ +#define REG_TMR_CFG0 0x316 /* RW Wake up Timer Conf Register - High */ +#define REG_TMR_CFG1 0x317 /* RW Wake up Timer Conf Register - Low */ +#define REG_TMR_RLD0 0x318 /* RW Wake up Timer Value Register - High */ +#define REG_TMR_RLD1 0x319 /* RW Wake up Timer Value Register - Low */ +#define REG_TMR_CTRL 0x31A /* RW Wake up Timer Timeout flag */ +#define REG_PD_AUX 0x31E /* RW Battmon enable */ +#define REG_GP_CFG 0x32C /* RW GPIO Configuration */ +#define REG_GP_OUT 0x32D /* RW GPIO Configuration */ +#define REG_GP_IN 0x32E /* R GPIO Configuration */ +#define REG_SYNT 0x335 /* RW bandwidth calibration timers */ +#define REG_CAL_CFG 0x33D /* RW Calibration Settings */ +#define REG_PA_BIAS 0x36E /* RW PA BIAS */ +#define REG_SYNT_CAL 0x371 /* RW Oscillator and Doubler Configuration */ +#define REG_IIRF_CFG 0x389 /* RW BB Filter Decimation Rate */ +#define REG_CDR_CFG 0x38A /* RW CDR kVCO */ +#define REG_DM_CFG1 0x38B /* RW Postdemodulator Filter */ +#define REG_AGCSTAT 0x38E /* R RXBB Ref Osc Calibration Engine Readback */ +#define REG_RXCAL0 0x395 /* RW RX BB filter tuning, LSB */ +#define REG_RXCAL1 0x396 /* RW RX BB filter tuning, MSB */ +#define REG_RXFE_CFG 0x39B /* RW RXBB Ref Osc & RXFE Calibration */ +#define REG_PA_RR 0x3A7 /* RW Set PA ramp rate */ +#define REG_PA_CFG 0x3A8 /* RW PA enable */ +#define REG_EXTPA_CFG 0x3A9 /* RW External PA BIAS DAC */ +#define REG_EXTPA_MSC 0x3AA /* RW PA Bias Mode */ +#define REG_ADC_RBK 0x3AE /* R Readback temp */ +#define REG_AGC_CFG1 0x3B2 /* RW GC Parameters */ +#define REG_AGC_MAX 0x3B4 /* RW Slew rate */ +#define REG_AGC_CFG2 0x3B6 /* RW RSSI Parameters */ +#define REG_AGC_CFG3 0x3B7 /* RW RSSI Parameters */ +#define REG_AGC_CFG4 0x3B8 /* RW RSSI Parameters */ +#define REG_AGC_CFG5 0x3B9 /* RW RSSI & NDEC Parameters */ +#define REG_AGC_CFG6 0x3BA /* RW NDEC Parameters */ +#define REG_OCL_CFG1 0x3C4 /* RW OCL System Parameters */ +#define REG_IRQ1_EN0 0x3C7 /* RW Interrupt Mask set bits for IRQ1 */ +#define REG_IRQ1_EN1 0x3C8 /* RW Interrupt Mask set bits for IRQ1 */ +#define REG_IRQ2_EN0 0x3C9 /* RW Interrupt Mask set bits for IRQ2 */ +#define REG_IRQ2_EN1 0x3CA /* RW Interrupt Mask set bits for IRQ2 */ +#define REG_IRQ1_SRC0 0x3CB /* RW Interrupt Source bits for IRQ */ +#define REG_IRQ1_SRC1 0x3CC /* RW Interrupt Source bits for IRQ */ +#define REG_OCL_BW0 0x3D2 /* RW OCL System Parameters */ +#define REG_OCL_BW1 0x3D3 /* RW OCL System Parameters */ +#define REG_OCL_BW2 0x3D4 /* RW OCL System Parameters */ +#define REG_OCL_BW3 0x3D5 /* RW OCL System Parameters */ +#define REG_OCL_BW4 0x3D6 /* RW OCL System Parameters */ +#define REG_OCL_BWS 0x3D7 /* RW OCL System Parameters */ +#define REG_OCL_CFG13 0x3E0 /* RW OCL System Parameters */ +#define REG_GP_DRV 0x3E3 /* RW I/O pads Configuration and bg trim */ +#define REG_BM_CFG 0x3E6 /* RW Batt. Monitor Threshold Voltage setting */ +#define REG_SFD_15_4 0x3F4 /* RW Option to set non standard SFD */ +#define REG_AFC_CFG 0x3F7 /* RW AFC mode and polarity */ +#define REG_AFC_KI_KP 0x3F8 /* RW AFC ki and kp */ +#define REG_AFC_RANGE 0x3F9 /* RW AFC range */ +#define REG_AFC_READ 0x3FA /* RW Readback frequency error */ + +/* REG_EXTPA_MSC */ +#define PA_PWR(x) (((x) & 0xF) << 4) +#define EXTPA_BIAS_SRC BIT(3) +#define EXTPA_BIAS_MODE(x) (((x) & 0x7) << 0) + +/* REG_PA_CFG */ +#define PA_BRIDGE_DBIAS(x) (((x) & 0x1F) << 0) +#define PA_DBIAS_HIGH_POWER 21 +#define PA_DBIAS_LOW_POWER 13 + +/* REG_PA_BIAS */ +#define PA_BIAS_CTRL(x) (((x) & 0x1F) << 1) +#define REG_PA_BIAS_DFL BIT(0) +#define PA_BIAS_HIGH_POWER 63 +#define PA_BIAS_LOW_POWER 55 + +#define REG_PAN_ID0 0x112 +#define REG_PAN_ID1 0x113 +#define REG_SHORT_ADDR_0 0x114 +#define REG_SHORT_ADDR_1 0x115 +#define REG_IEEE_ADDR_0 0x116 +#define REG_IEEE_ADDR_1 0x117 +#define REG_IEEE_ADDR_2 0x118 +#define REG_IEEE_ADDR_3 0x119 +#define REG_IEEE_ADDR_4 0x11A +#define REG_IEEE_ADDR_5 0x11B +#define REG_IEEE_ADDR_6 0x11C +#define REG_IEEE_ADDR_7 0x11D +#define REG_FFILT_CFG 0x11E +#define REG_AUTO_CFG 0x11F +#define REG_AUTO_TX1 0x120 +#define REG_AUTO_TX2 0x121 +#define REG_AUTO_STATUS 0x122 + +/* REG_FFILT_CFG */ +#define ACCEPT_BEACON_FRAMES BIT(0) +#define ACCEPT_DATA_FRAMES BIT(1) +#define ACCEPT_ACK_FRAMES BIT(2) +#define ACCEPT_MACCMD_FRAMES BIT(3) +#define ACCEPT_RESERVED_FRAMES BIT(4) +#define ACCEPT_ALL_ADDRESS BIT(5) + +/* REG_AUTO_CFG */ +#define AUTO_ACK_FRAMEPEND BIT(0) +#define IS_PANCOORD BIT(1) +#define RX_AUTO_ACK_EN BIT(3) +#define CSMA_CA_RX_TURNAROUND BIT(4) + +/* REG_AUTO_TX1 */ +#define MAX_FRAME_RETRIES(x) ((x) & 0xF) +#define MAX_CCA_RETRIES(x) (((x) & 0x7) << 4) + +/* REG_AUTO_TX2 */ +#define CSMA_MAX_BE(x) ((x) & 0xF) +#define CSMA_MIN_BE(x) (((x) & 0xF) << 4) + +#define CMD_SPI_NOP 0xFF /* No operation. Use for dummy writes */ +#define CMD_SPI_PKT_WR 0x10 /* Write telegram to the Packet RAM + * starting from the TX packet base address + * pointer tx_packet_base + */ +#define CMD_SPI_PKT_RD 0x30 /* Read telegram from the Packet RAM + * starting from RX packet base address + * pointer rxpb.rx_packet_base + */ +#define CMD_SPI_MEM_WR(x) (0x18 + (x >> 8)) /* Write data to MCR or + * Packet RAM sequentially + */ +#define CMD_SPI_MEM_RD(x) (0x38 + (x >> 8)) /* Read data from MCR or + * Packet RAM sequentially + */ +#define CMD_SPI_MEMR_WR(x) (0x08 + (x >> 8)) /* Write data to MCR or Packet + * RAM as random block + */ +#define CMD_SPI_MEMR_RD(x) (0x28 + (x >> 8)) /* Read data from MCR or + * Packet RAM random block + */ +#define CMD_SPI_PRAM_WR 0x1E /* Write data sequentially to current + * PRAM page selected + */ +#define CMD_SPI_PRAM_RD 0x3E /* Read data sequentially from current + * PRAM page selected + */ +#define CMD_RC_SLEEP 0xB1 /* Invoke transition of radio controller + * into SLEEP state + */ +#define CMD_RC_IDLE 0xB2 /* Invoke transition of radio controller + * into IDLE state + */ +#define CMD_RC_PHY_RDY 0xB3 /* Invoke transition of radio controller + * into PHY_RDY state + */ +#define CMD_RC_RX 0xB4 /* Invoke transition of radio controller + * into RX state + */ +#define CMD_RC_TX 0xB5 /* Invoke transition of radio controller + * into TX state + */ +#define CMD_RC_MEAS 0xB6 /* Invoke transition of radio controller + * into MEAS state + */ +#define CMD_RC_CCA 0xB7 /* Invoke Clear channel assessment */ +#define CMD_RC_CSMACA 0xC1 /* initiates CSMA-CA channel access + * sequence and frame transmission + */ +#define CMD_RC_PC_RESET 0xC7 /* Program counter reset */ +#define CMD_RC_RESET 0xC8 /* Resets the ADF7242 and puts it in + * the sleep state + */ +#define CMD_RC_PC_RESET_NO_WAIT (CMD_RC_PC_RESET | BIT(31)) + +/* STATUS */ + +#define STAT_SPI_READY BIT(7) +#define STAT_IRQ_STATUS BIT(6) +#define STAT_RC_READY BIT(5) +#define STAT_CCA_RESULT BIT(4) +#define RC_STATUS_IDLE 1 +#define RC_STATUS_MEAS 2 +#define RC_STATUS_PHY_RDY 3 +#define RC_STATUS_RX 4 +#define RC_STATUS_TX 5 +#define RC_STATUS_MASK 0xF + +/* AUTO_STATUS */ + +#define SUCCESS 0 +#define SUCCESS_DATPEND 1 +#define FAILURE_CSMACA 2 +#define FAILURE_NOACK 3 +#define AUTO_STATUS_MASK 0x3 + +#define PRAM_PAGESIZE 256 + +/* IRQ1 */ + +#define IRQ_CCA_COMPLETE BIT(0) +#define IRQ_SFD_RX BIT(1) +#define IRQ_SFD_TX BIT(2) +#define IRQ_RX_PKT_RCVD BIT(3) +#define IRQ_TX_PKT_SENT BIT(4) +#define IRQ_FRAME_VALID BIT(5) +#define IRQ_ADDRESS_VALID BIT(6) +#define IRQ_CSMA_CA BIT(7) + +#define AUTO_TX_TURNAROUND BIT(3) +#define ADDON_EN BIT(4) + +#define FLAG_XMIT 0 +#define FLAG_START 1 + +#define ADF7242_REPORT_CSMA_CA_STAT 0 /* framework doesn't handle yet */ + +struct adf7242_local { + struct spi_device *spi; + struct completion tx_complete; + struct ieee802154_hw *hw; + struct mutex bmux; /* protect SPI messages */ + struct spi_message stat_msg; + struct spi_transfer stat_xfer; + struct dentry *debugfs_root; + unsigned long flags; + int tx_stat; + bool promiscuous; + s8 rssi; + u8 max_frame_retries; + u8 max_cca_retries; + u8 max_be; + u8 min_be; + + /* DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + + u8 buf[3] ____cacheline_aligned; + u8 buf_reg_tx[3]; + u8 buf_read_tx[4]; + u8 buf_read_rx[4]; + u8 buf_stat_rx; + u8 buf_stat_tx; + u8 buf_cmd; +}; + +static int adf7242_soft_reset(struct adf7242_local *lp, int line); + +static int adf7242_status(struct adf7242_local *lp, u8 *stat) +{ + int status; + + mutex_lock(&lp->bmux); + status = spi_sync(lp->spi, &lp->stat_msg); + *stat = lp->buf_stat_rx; + mutex_unlock(&lp->bmux); + + return status; +} + +static int adf7242_wait_status(struct adf7242_local *lp, unsigned status, + unsigned mask, int line) +{ + int cnt = 0, ret = 0; + u8 stat; + + do { + adf7242_status(lp, &stat); + cnt++; + } while (((stat & mask) != status) && (cnt < MAX_POLL_LOOPS)); + + if (cnt >= MAX_POLL_LOOPS) { + ret = -ETIMEDOUT; + + if (!(stat & STAT_RC_READY)) { + adf7242_soft_reset(lp, line); + adf7242_status(lp, &stat); + + if ((stat & mask) == status) + ret = 0; + } + + if (ret < 0) + dev_warn(&lp->spi->dev, + "%s:line %d Timeout status 0x%x (%d)\n", + __func__, line, stat, cnt); + } + + dev_vdbg(&lp->spi->dev, "%s : loops=%d line %d\n", __func__, cnt, line); + + return ret; +} + +static int adf7242_wait_ready(struct adf7242_local *lp, int line) +{ + return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY, + STAT_RC_READY | STAT_SPI_READY, line); +} + +static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + + }; + struct spi_transfer xfer_buf = { + .len = len, + .tx_buf = data, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + adf7242_wait_ready(lp, __LINE__); + + mutex_lock(&lp->bmux); + buf[0] = CMD_SPI_PKT_WR; + buf[1] = len + 2; + + status = spi_sync(lp->spi, &msg); + mutex_unlock(&lp->bmux); + + return status; +} + +static int adf7242_read_fbuf(struct adf7242_local *lp, + u8 *data, size_t len, bool packet_read) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 3, + .tx_buf = buf, + .rx_buf = buf, + }; + struct spi_transfer xfer_buf = { + .len = len, + .rx_buf = data, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + adf7242_wait_ready(lp, __LINE__); + + mutex_lock(&lp->bmux); + if (packet_read) { + buf[0] = CMD_SPI_PKT_RD; + buf[1] = CMD_SPI_NOP; + buf[2] = 0; /* PHR */ + } else { + buf[0] = CMD_SPI_PRAM_RD; + buf[1] = 0; + buf[2] = CMD_SPI_NOP; + } + + status = spi_sync(lp->spi, &msg); + + mutex_unlock(&lp->bmux); + + return status; +} + +static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data) +{ + int status; + struct spi_message msg; + + struct spi_transfer xfer = { + .len = 4, + .tx_buf = lp->buf_read_tx, + .rx_buf = lp->buf_read_rx, + }; + + adf7242_wait_ready(lp, __LINE__); + + mutex_lock(&lp->bmux); + lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr); + lp->buf_read_tx[1] = addr; + lp->buf_read_tx[2] = CMD_SPI_NOP; + lp->buf_read_tx[3] = CMD_SPI_NOP; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + status = spi_sync(lp->spi, &msg); + if (msg.status) + status = msg.status; + + if (!status) + *data = lp->buf_read_rx[3]; + + mutex_unlock(&lp->bmux); + + dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n", __func__, + addr, *data); + + return status; +} + +static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data) +{ + int status; + + adf7242_wait_ready(lp, __LINE__); + + mutex_lock(&lp->bmux); + lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr); + lp->buf_reg_tx[1] = addr; + lp->buf_reg_tx[2] = data; + status = spi_write(lp->spi, lp->buf_reg_tx, 3); + mutex_unlock(&lp->bmux); + + dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n", + __func__, addr, data); + + return status; +} + +static int adf7242_cmd(struct adf7242_local *lp, unsigned cmd) +{ + int status; + + dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd); + + if (cmd != CMD_RC_PC_RESET_NO_WAIT) + adf7242_wait_ready(lp, __LINE__); + + mutex_lock(&lp->bmux); + lp->buf_cmd = cmd; + status = spi_write(lp->spi, &lp->buf_cmd, 1); + mutex_unlock(&lp->bmux); + + return status; +} + +static int adf7242_upload_firmware(struct adf7242_local *lp, u8 *data, u16 len) +{ + struct spi_message msg; + struct spi_transfer xfer_buf = { }; + int status, i, page = 0; + u8 *buf = lp->buf; + + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + }; + + buf[0] = CMD_SPI_PRAM_WR; + buf[1] = 0; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + for (i = len; i >= 0; i -= PRAM_PAGESIZE) { + adf7242_write_reg(lp, REG_PRAMPG, page); + + xfer_buf.len = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i; + xfer_buf.tx_buf = &data[page * PRAM_PAGESIZE]; + + mutex_lock(&lp->bmux); + status = spi_sync(lp->spi, &msg); + mutex_unlock(&lp->bmux); + page++; + } + + return status; +} + +static int adf7242_verify_firmware(struct adf7242_local *lp, + const u8 *data, size_t len) +{ +#ifdef DEBUG + int i, j; + unsigned int page; + u8 *buf = kmalloc(PRAM_PAGESIZE, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + for (page = 0, i = len; i >= 0; i -= PRAM_PAGESIZE, page++) { + size_t nb = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i; + + adf7242_write_reg(lp, REG_PRAMPG, page); + adf7242_read_fbuf(lp, buf, nb, false); + + for (j = 0; j < nb; j++) { + if (buf[j] != data[page * PRAM_PAGESIZE + j]) { + kfree(buf); + return -EIO; + } + } + } + kfree(buf); +#endif + return 0; +} + +static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) +{ + struct adf7242_local *lp = hw->priv; + u8 pwr, bias_ctrl, dbias, tmp; + int db = mbm / 100; + + dev_vdbg(&lp->spi->dev, "%s : Power %d dB\n", __func__, db); + + if (db > 5 || db < -26) + return -EINVAL; + + db = DIV_ROUND_CLOSEST(db + 29, 2); + + if (db > 15) { + dbias = PA_DBIAS_HIGH_POWER; + bias_ctrl = PA_BIAS_HIGH_POWER; + } else { + dbias = PA_DBIAS_LOW_POWER; + bias_ctrl = PA_BIAS_LOW_POWER; + } + + pwr = clamp_t(u8, db, 3, 15); + + adf7242_read_reg(lp, REG_PA_CFG, &tmp); + tmp &= ~PA_BRIDGE_DBIAS(~0); + tmp |= PA_BRIDGE_DBIAS(dbias); + adf7242_write_reg(lp, REG_PA_CFG, tmp); + + adf7242_read_reg(lp, REG_PA_BIAS, &tmp); + tmp &= ~PA_BIAS_CTRL(~0); + tmp |= PA_BIAS_CTRL(bias_ctrl); + adf7242_write_reg(lp, REG_PA_BIAS, tmp); + + adf7242_read_reg(lp, REG_EXTPA_MSC, &tmp); + tmp &= ~PA_PWR(~0); + tmp |= PA_PWR(pwr); + + return adf7242_write_reg(lp, REG_EXTPA_MSC, tmp); +} + +static int adf7242_set_csma_params(struct ieee802154_hw *hw, u8 min_be, + u8 max_be, u8 retries) +{ + struct adf7242_local *lp = hw->priv; + int ret; + + dev_vdbg(&lp->spi->dev, "%s : min_be=%d max_be=%d retries=%d\n", + __func__, min_be, max_be, retries); + + if (min_be > max_be || max_be > 8 || retries > 5) + return -EINVAL; + + ret = adf7242_write_reg(lp, REG_AUTO_TX1, + MAX_FRAME_RETRIES(lp->max_frame_retries) | + MAX_CCA_RETRIES(retries)); + if (ret) + return ret; + + lp->max_cca_retries = retries; + lp->max_be = max_be; + lp->min_be = min_be; + + return adf7242_write_reg(lp, REG_AUTO_TX2, CSMA_MAX_BE(max_be) | + CSMA_MIN_BE(min_be)); +} + +static int adf7242_set_frame_retries(struct ieee802154_hw *hw, s8 retries) +{ + struct adf7242_local *lp = hw->priv; + int ret = 0; + + dev_vdbg(&lp->spi->dev, "%s : Retries = %d\n", __func__, retries); + + if (retries < -1 || retries > 15) + return -EINVAL; + + if (retries >= 0) + ret = adf7242_write_reg(lp, REG_AUTO_TX1, + MAX_FRAME_RETRIES(retries) | + MAX_CCA_RETRIES(lp->max_cca_retries)); + + lp->max_frame_retries = retries; + + return ret; +} + +static int adf7242_ed(struct ieee802154_hw *hw, u8 *level) +{ + struct adf7242_local *lp = hw->priv; + + *level = lp->rssi; + + dev_vdbg(&lp->spi->dev, "%s :Exit level=%d\n", + __func__, *level); + + return 0; +} + +static int adf7242_start(struct ieee802154_hw *hw) +{ + struct adf7242_local *lp = hw->priv; + + adf7242_cmd(lp, CMD_RC_PHY_RDY); + adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + enable_irq(lp->spi->irq); + set_bit(FLAG_START, &lp->flags); + + return adf7242_cmd(lp, CMD_RC_RX); +} + +static void adf7242_stop(struct ieee802154_hw *hw) +{ + struct adf7242_local *lp = hw->priv; + + adf7242_cmd(lp, CMD_RC_IDLE); + clear_bit(FLAG_START, &lp->flags); + disable_irq(lp->spi->irq); + adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); +} + +static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct adf7242_local *lp = hw->priv; + unsigned long freq; + + dev_dbg(&lp->spi->dev, "%s :Channel=%d\n", __func__, channel); + + might_sleep(); + + WARN_ON(page != 0); + WARN_ON(channel < 11); + WARN_ON(channel > 26); + + freq = (2405 + 5 * (channel - 11)) * 100; + adf7242_cmd(lp, CMD_RC_PHY_RDY); + + adf7242_write_reg(lp, REG_CH_FREQ0, freq); + adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); + adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); + + return adf7242_cmd(lp, CMD_RC_RX); +} + +static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct adf7242_local *lp = hw->priv; + u8 reg; + + dev_dbg(&lp->spi->dev, "%s :Changed=0x%lX\n", __func__, changed); + + might_sleep(); + + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { + u8 addr[8], i; + + memcpy(addr, &filt->ieee_addr, 8); + + for (i = 0; i < 8; i++) + adf7242_write_reg(lp, REG_IEEE_ADDR_0 + i, addr[i]); + } + + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { + u16 saddr = le16_to_cpu(filt->short_addr); + + adf7242_write_reg(lp, REG_SHORT_ADDR_0, saddr); + adf7242_write_reg(lp, REG_SHORT_ADDR_1, saddr >> 8); + } + + if (changed & IEEE802154_AFILT_PANID_CHANGED) { + u16 pan_id = le16_to_cpu(filt->pan_id); + + adf7242_write_reg(lp, REG_PAN_ID0, pan_id); + adf7242_write_reg(lp, REG_PAN_ID1, pan_id >> 8); + } + + if (changed & IEEE802154_AFILT_PANC_CHANGED) { + adf7242_read_reg(lp, REG_AUTO_CFG, ®); + if (filt->pan_coord) + reg |= IS_PANCOORD; + else + reg &= ~IS_PANCOORD; + adf7242_write_reg(lp, REG_AUTO_CFG, reg); + } + + return 0; +} + +static int adf7242_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) +{ + struct adf7242_local *lp = hw->priv; + + dev_dbg(&lp->spi->dev, "%s : mode %d\n", __func__, on); + + lp->promiscuous = on; + + if (on) { + adf7242_write_reg(lp, REG_AUTO_CFG, 0); + return adf7242_write_reg(lp, REG_FFILT_CFG, + ACCEPT_BEACON_FRAMES | + ACCEPT_DATA_FRAMES | + ACCEPT_MACCMD_FRAMES | + ACCEPT_ALL_ADDRESS | + ACCEPT_ACK_FRAMES | + ACCEPT_RESERVED_FRAMES); + } else { + adf7242_write_reg(lp, REG_FFILT_CFG, + ACCEPT_BEACON_FRAMES | + ACCEPT_DATA_FRAMES | + ACCEPT_MACCMD_FRAMES | + ACCEPT_RESERVED_FRAMES); + + return adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN); + } +} + +static int adf7242_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) +{ + struct adf7242_local *lp = hw->priv; + s8 level = clamp_t(s8, mbm / 100, S8_MIN, S8_MAX); + + dev_dbg(&lp->spi->dev, "%s : level %d\n", __func__, level); + + return adf7242_write_reg(lp, REG_CCA1, level); +} + +static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct adf7242_local *lp = hw->priv; + int ret; + + set_bit(FLAG_XMIT, &lp->flags); + reinit_completion(&lp->tx_complete); + adf7242_cmd(lp, CMD_RC_PHY_RDY); + + ret = adf7242_write_fbuf(lp, skb->data, skb->len); + if (ret) + goto err; + + ret = adf7242_cmd(lp, CMD_RC_CSMACA); + if (ret) + goto err; + + ret = wait_for_completion_interruptible_timeout(&lp->tx_complete, + HZ / 10); + if (ret < 0) + goto err; + if (ret == 0) { + dev_dbg(&lp->spi->dev, "Timeout waiting for TX interrupt\n"); + ret = -ETIMEDOUT; + goto err; + } + + if (lp->tx_stat != SUCCESS) { + dev_dbg(&lp->spi->dev, + "Error xmit: Retry count exceeded Status=0x%x\n", + lp->tx_stat); + ret = -ECOMM; + } else { + ret = 0; + } + +err: + clear_bit(FLAG_XMIT, &lp->flags); + adf7242_cmd(lp, CMD_RC_RX); + + return ret; +} + +static int adf7242_rx(struct adf7242_local *lp) +{ + struct sk_buff *skb; + size_t len; + int ret; + u8 lqi, len_u8, *data; + + adf7242_read_reg(lp, 0, &len_u8); + + len = len_u8; + + if (!ieee802154_is_valid_psdu_len(len)) { + dev_dbg(&lp->spi->dev, + "corrupted frame received len %d\n", (int)len); + len = IEEE802154_MTU; + } + + skb = dev_alloc_skb(len); + if (!skb) { + adf7242_cmd(lp, CMD_RC_RX); + return -ENOMEM; + } + + data = skb_put(skb, len); + ret = adf7242_read_fbuf(lp, data, len, true); + if (ret < 0) { + kfree_skb(skb); + adf7242_cmd(lp, CMD_RC_RX); + return ret; + } + + lqi = data[len - 2]; + lp->rssi = data[len - 1]; + + adf7242_cmd(lp, CMD_RC_RX); + + skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */ + + ieee802154_rx_irqsafe(lp->hw, skb, lqi); + + dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n", + __func__, ret, (int)len, (int)lqi, lp->rssi); + + return 0; +} + +static struct ieee802154_ops adf7242_ops = { + .owner = THIS_MODULE, + .xmit_sync = adf7242_xmit, + .ed = adf7242_ed, + .set_channel = adf7242_channel, + .set_hw_addr_filt = adf7242_set_hw_addr_filt, + .start = adf7242_start, + .stop = adf7242_stop, + .set_csma_params = adf7242_set_csma_params, + .set_frame_retries = adf7242_set_frame_retries, + .set_txpower = adf7242_set_txpower, + .set_promiscuous_mode = adf7242_set_promiscuous_mode, + .set_cca_ed_level = adf7242_set_cca_ed_level, +}; + +static void adf7242_debug(u8 irq1) +{ +#ifdef DEBUG + u8 stat; + + adf7242_status(lp, &stat); + + dev_dbg(&lp->spi->dev, "%s IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n", + __func__, irq1, + irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "", + irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "", + irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "", + irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "", + irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "", + irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "", + irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", + irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); + + dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n", + __func__, stat, + stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", + (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", + (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", + (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", + (stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "", + (stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : ""); + } +#endif +} + +static irqreturn_t adf7242_isr(int irq, void *data) +{ + struct adf7242_local *lp = data; + unsigned xmit; + u8 irq1; + + adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); + + adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); + adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1); + + if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) + dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", + __func__, irq1); + + adf7242_debug(irq1); + + xmit = test_bit(FLAG_XMIT, &lp->flags); + + if (xmit && (irq1 & IRQ_CSMA_CA)) { + if (ADF7242_REPORT_CSMA_CA_STAT) { + u8 astat; + + adf7242_read_reg(lp, REG_AUTO_STATUS, &astat); + astat &= AUTO_STATUS_MASK; + + dev_dbg(&lp->spi->dev, "AUTO_STATUS = %X:\n%s%s%s%s\n", + astat, + astat == SUCCESS ? "SUCCESS" : "", + astat == + SUCCESS_DATPEND ? "SUCCESS_DATPEND" : "", + astat == FAILURE_CSMACA ? "FAILURE_CSMACA" : "", + astat == FAILURE_NOACK ? "FAILURE_NOACK" : ""); + + /* save CSMA-CA completion status */ + lp->tx_stat = astat; + } else { + lp->tx_stat = SUCCESS; + } + complete(&lp->tx_complete); + } else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) && + (irq1 & IRQ_FRAME_VALID)) { + adf7242_rx(lp); + } else if (!xmit && test_bit(FLAG_START, &lp->flags)) { + /* Invalid packet received - drop it and restart */ + dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n", + __func__, __LINE__, irq1); + adf7242_cmd(lp, CMD_RC_PHY_RDY); + adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_cmd(lp, CMD_RC_RX); + } else { + /* This can only be xmit without IRQ, likely a RX packet. + * we get an TX IRQ shortly - do nothing or let the xmit + * timeout handle this + */ + dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n", + __func__, __LINE__, irq1, xmit); + complete(&lp->tx_complete); + } + + return IRQ_HANDLED; +} + +static int adf7242_soft_reset(struct adf7242_local *lp, int line) +{ + dev_warn(&lp->spi->dev, "%s (line %d)\n", __func__, line); + + if (test_bit(FLAG_START, &lp->flags)) + disable_irq_nosync(lp->spi->irq); + + adf7242_cmd(lp, CMD_RC_PC_RESET_NO_WAIT); + usleep_range(200, 250); + adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2)); + adf7242_cmd(lp, CMD_RC_PHY_RDY); + adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous); + adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be, + lp->max_cca_retries); + adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + + if (test_bit(FLAG_START, &lp->flags)) { + enable_irq(lp->spi->irq); + return adf7242_cmd(lp, CMD_RC_RX); + } + + return 0; +} + +static int adf7242_hw_init(struct adf7242_local *lp) +{ + int ret; + const struct firmware *fw; + + adf7242_cmd(lp, CMD_RC_RESET); + adf7242_cmd(lp, CMD_RC_IDLE); + + /* get ADF7242 addon firmware + * build this driver as module + * and place under /lib/firmware/adf7242_firmware.bin + * or compile firmware into the kernel. + */ + ret = request_firmware(&fw, FIRMWARE, &lp->spi->dev); + if (ret) { + dev_err(&lp->spi->dev, + "request_firmware() failed with %d\n", ret); + return ret; + } + + ret = adf7242_upload_firmware(lp, (u8 *)fw->data, fw->size); + if (ret) { + dev_err(&lp->spi->dev, + "upload firmware failed with %d\n", ret); + return ret; + } + + ret = adf7242_verify_firmware(lp, (u8 *)fw->data, fw->size); + if (ret) { + dev_err(&lp->spi->dev, + "verify firmware failed with %d\n", ret); + return ret; + } + + adf7242_cmd(lp, CMD_RC_PC_RESET); + + release_firmware(fw); + + adf7242_write_reg(lp, REG_FFILT_CFG, + ACCEPT_BEACON_FRAMES | + ACCEPT_DATA_FRAMES | + ACCEPT_MACCMD_FRAMES | + ACCEPT_RESERVED_FRAMES); + + adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN); + + adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2)); + + adf7242_write_reg(lp, REG_EXTPA_MSC, 0xF1); + adf7242_write_reg(lp, REG_RXFE_CFG, 0x1D); + + adf7242_write_reg(lp, REG_IRQ1_EN0, 0); + adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA); + + adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF); + + adf7242_cmd(lp, CMD_RC_IDLE); + + return 0; +} + +static int adf7242_stats_show(struct seq_file *file, void *offset) +{ + struct adf7242_local *lp = spi_get_drvdata(file->private); + u8 stat, irq1; + + adf7242_status(lp, &stat); + adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); + + seq_printf(file, "IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n", irq1, + irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "", + irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "", + irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "", + irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "", + irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "", + irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "", + irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", + irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); + + seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat, + stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", + (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", + (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", + (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", + (stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "", + (stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : ""); + + seq_printf(file, "RSSI = %d\n", lp->rssi); + + return 0; +} + +static int adf7242_debugfs_init(struct adf7242_local *lp) +{ + char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-"; + struct dentry *stats; + + strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); + + lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); + if (IS_ERR_OR_NULL(lp->debugfs_root)) + return PTR_ERR_OR_ZERO(lp->debugfs_root); + + stats = debugfs_create_devm_seqfile(&lp->spi->dev, "status", + lp->debugfs_root, + adf7242_stats_show); + return PTR_ERR_OR_ZERO(stats); + + return 0; +} + +static const s32 adf7242_powers[] = { + 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, + -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, + -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, +}; + +static const s32 adf7242_ed_levels[] = { + -9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100, + -8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100, + -7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100, + -6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100, + -5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100, + -4000, -3900, -3800, -3700, -3600, -3500, -3400, -3200, -3100, -3000 +}; + +static int adf7242_probe(struct spi_device *spi) +{ + struct ieee802154_hw *hw; + struct adf7242_local *lp; + int ret, irq_type; + + if (!spi->irq) { + dev_err(&spi->dev, "no IRQ specified\n"); + return -EINVAL; + } + + hw = ieee802154_alloc_hw(sizeof(*lp), &adf7242_ops); + if (!hw) + return -ENOMEM; + + lp = hw->priv; + lp->hw = hw; + lp->spi = spi; + + hw->priv = lp; + hw->parent = &spi->dev; + hw->extra_tx_headroom = 0; + + /* We support only 2.4 Ghz */ + hw->phy->supported.channels[0] = 0x7FFF800; + + hw->flags = IEEE802154_HW_OMIT_CKSUM | + IEEE802154_HW_CSMA_PARAMS | + IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; + + hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | + WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; + + hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY); + + hw->phy->supported.cca_ed_levels = adf7242_ed_levels; + hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(adf7242_ed_levels); + + hw->phy->cca.mode = NL802154_CCA_ENERGY; + + hw->phy->supported.tx_powers = adf7242_powers; + hw->phy->supported.tx_powers_size = ARRAY_SIZE(adf7242_powers); + + hw->phy->supported.min_minbe = 0; + hw->phy->supported.max_minbe = 8; + + hw->phy->supported.min_maxbe = 3; + hw->phy->supported.max_maxbe = 8; + + hw->phy->supported.min_frame_retries = 0; + hw->phy->supported.max_frame_retries = 15; + + hw->phy->supported.min_csma_backoffs = 0; + hw->phy->supported.max_csma_backoffs = 5; + + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + mutex_init(&lp->bmux); + init_completion(&lp->tx_complete); + + /* Setup Status Message */ + lp->stat_xfer.len = 1; + lp->stat_xfer.tx_buf = &lp->buf_stat_tx; + lp->stat_xfer.rx_buf = &lp->buf_stat_rx; + lp->buf_stat_tx = CMD_SPI_NOP; + + spi_message_init(&lp->stat_msg); + spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); + + spi_set_drvdata(spi, lp); + + ret = adf7242_hw_init(lp); + if (ret) + goto err_hw_init; + + irq_type = irq_get_trigger_type(spi->irq); + if (!irq_type) + irq_type = IRQF_TRIGGER_HIGH; + + ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL, adf7242_isr, + irq_type | IRQF_ONESHOT, + dev_name(&spi->dev), lp); + if (ret) + goto err_hw_init; + + disable_irq(spi->irq); + + ret = ieee802154_register_hw(lp->hw); + if (ret) + goto err_hw_init; + + dev_set_drvdata(&spi->dev, lp); + + adf7242_debugfs_init(lp); + + dev_info(&spi->dev, "mac802154 IRQ-%d registered\n", spi->irq); + + return ret; + +err_hw_init: + mutex_destroy(&lp->bmux); + ieee802154_free_hw(lp->hw); + + return ret; +} + +static int adf7242_remove(struct spi_device *spi) +{ + struct adf7242_local *lp = spi_get_drvdata(spi); + + if (!IS_ERR_OR_NULL(lp->debugfs_root)) + debugfs_remove_recursive(lp->debugfs_root); + + ieee802154_unregister_hw(lp->hw); + mutex_destroy(&lp->bmux); + ieee802154_free_hw(lp->hw); + + return 0; +} + +static const struct of_device_id adf7242_of_match[] = { + { .compatible = "adi,adf7242", }, + { }, +}; +MODULE_DEVICE_TABLE(of, adf7242_of_match); + +static const struct spi_device_id adf7242_device_id[] = { + { .name = "adf7242", }, + { }, +}; +MODULE_DEVICE_TABLE(spi, adf7242_device_id); + +static struct spi_driver adf7242_driver = { + .id_table = adf7242_device_id, + .driver = { + .of_match_table = of_match_ptr(adf7242_of_match), + .name = "adf7242", + .owner = THIS_MODULE, + }, + .probe = adf7242_probe, + .remove = adf7242_remove, +}; + +module_spi_driver(adf7242_driver); + +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 199a94a9c8bc..b1cd865ade2e 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -310,8 +310,7 @@ static void atusb_free_urbs(struct atusb *atusb) urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) break; - if (urb->context) - kfree_skb(urb->context); + kfree_skb(urb->context); usb_free_urb(urb); } } diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index e65b60591317..d50add705a79 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -21,6 +21,8 @@ #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/ieee802154.h> +#include <linux/crc-ccitt.h> +#include <asm/unaligned.h> #include <net/mac802154.h> #include <net/cfg802154.h> @@ -189,6 +191,18 @@ #define CC2520_RXFIFOCNT 0x3E #define CC2520_TXFIFOCNT 0x3F +/* CC2520_FRMFILT0 */ +#define FRMFILT0_FRAME_FILTER_EN BIT(0) +#define FRMFILT0_PAN_COORDINATOR BIT(1) + +/* CC2520_FRMCTRL0 */ +#define FRMCTRL0_AUTOACK BIT(5) +#define FRMCTRL0_AUTOCRC BIT(6) + +/* CC2520_FRMCTRL1 */ +#define FRMCTRL1_SET_RXENMASK_ON_TX BIT(0) +#define FRMCTRL1_IGNORE_TX_UNDERF BIT(1) + /* Driver private information */ struct cc2520_private { struct spi_device *spi; /* SPI device structure */ @@ -201,6 +215,7 @@ struct cc2520_private { struct work_struct fifop_irqwork;/* Workqueue for FIFOP */ spinlock_t lock; /* Lock for is_tx*/ struct completion tx_complete; /* Work completion for Tx */ + bool promiscuous; /* Flag for promiscuous mode */ }; /* Generic Functions */ @@ -367,14 +382,14 @@ cc2520_read_register(struct cc2520_private *priv, u8 reg, u8 *data) } static int -cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len) +cc2520_write_txfifo(struct cc2520_private *priv, u8 pkt_len, u8 *data, u8 len) { int status; /* length byte must include FCS even * if it is calculated in the hardware */ - int len_byte = len + 2; + int len_byte = pkt_len; struct spi_message msg; @@ -414,7 +429,7 @@ cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len) } static int -cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi) +cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len) { int status; struct spi_message msg; @@ -470,12 +485,25 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) unsigned long flags; int rc; u8 status = 0; + u8 pkt_len; + + /* In promiscuous mode we disable AUTOCRC so we can get the raw CRC + * values on RX. This means we need to manually add the CRC on TX. + */ + if (priv->promiscuous) { + u16 crc = crc_ccitt(0, skb->data, skb->len); + + put_unaligned_le16(crc, skb_put(skb, 2)); + pkt_len = skb->len; + } else { + pkt_len = skb->len + 2; + } rc = cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX); if (rc) goto err_tx; - rc = cc2520_write_txfifo(priv, skb->data, skb->len); + rc = cc2520_write_txfifo(priv, pkt_len, skb->data, skb->len); if (rc) goto err_tx; @@ -518,22 +546,62 @@ static int cc2520_rx(struct cc2520_private *priv) u8 len = 0, lqi = 0, bytes = 1; struct sk_buff *skb; - cc2520_read_rxfifo(priv, &len, bytes, &lqi); + /* Read single length byte from the radio. */ + cc2520_read_rxfifo(priv, &len, bytes); - if (len < 2 || len > IEEE802154_MTU) - return -EINVAL; + if (!ieee802154_is_valid_psdu_len(len)) { + /* Corrupted frame received, clear frame buffer by + * reading entire buffer. + */ + dev_dbg(&priv->spi->dev, "corrupted frame received\n"); + len = IEEE802154_MTU; + } skb = dev_alloc_skb(len); if (!skb) return -ENOMEM; - if (cc2520_read_rxfifo(priv, skb_put(skb, len), len, &lqi)) { + if (cc2520_read_rxfifo(priv, skb_put(skb, len), len)) { dev_dbg(&priv->spi->dev, "frame reception failed\n"); kfree_skb(skb); return -EINVAL; } - skb_trim(skb, skb->len - 2); + /* In promiscuous mode, we configure the radio to include the + * CRC (AUTOCRC==0) and we pass on the packet unconditionally. If not + * in promiscuous mode, we check the CRC here, but leave the + * RSSI/LQI/CRC_OK bytes as they will get removed in the mac layer. + */ + if (!priv->promiscuous) { + bool crc_ok; + + /* Check if the CRC is valid. With AUTOCRC set, the most + * significant bit of the last byte returned from the CC2520 + * is CRC_OK flag. See section 20.3.4 of the datasheet. + */ + crc_ok = skb->data[len - 1] & BIT(7); + + /* If we failed CRC drop the packet in the driver layer. */ + if (!crc_ok) { + dev_dbg(&priv->spi->dev, "CRC check failed\n"); + kfree_skb(skb); + return -EINVAL; + } + + /* To calculate LQI, the lower 7 bits of the last byte (the + * correlation value provided by the radio) must be scaled to + * the range 0-255. According to section 20.6, the correlation + * value ranges from 50-110. Ideally this would be calibrated + * per hardware design, but we use roughly the datasheet values + * to get close enough while avoiding floating point. + */ + lqi = skb->data[len - 1] & 0x7f; + if (lqi < 50) + lqi = 50; + else if (lqi > 113) + lqi = 113; + lqi = (lqi - 50) * 4; + } ieee802154_rx_irqsafe(priv->hw, skb, lqi); @@ -619,14 +687,19 @@ cc2520_filter(struct ieee802154_hw *hw, } if (changed & IEEE802154_AFILT_PANC_CHANGED) { + u8 frmfilt0; + dev_vdbg(&priv->spi->dev, "cc2520_filter called for panc change\n"); + + cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); + if (filt->pan_coord) - ret = cc2520_write_register(priv, CC2520_FRMFILT0, - 0x02); + frmfilt0 |= FRMFILT0_PAN_COORDINATOR; else - ret = cc2520_write_register(priv, CC2520_FRMFILT0, - 0x00); + frmfilt0 &= ~FRMFILT0_PAN_COORDINATOR; + + ret = cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); } return ret; @@ -723,6 +796,30 @@ cc2520_set_txpower(struct ieee802154_hw *hw, s32 mbm) return cc2520_cc2591_set_tx_power(priv, mbm); } +static int +cc2520_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) +{ + struct cc2520_private *priv = hw->priv; + u8 frmfilt0; + + dev_dbg(&priv->spi->dev, "%s : mode %d\n", __func__, on); + + priv->promiscuous = on; + + cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); + + if (on) { + /* Disable automatic ACK, automatic CRC, and frame filtering. */ + cc2520_write_register(priv, CC2520_FRMCTRL0, 0); + frmfilt0 &= ~FRMFILT0_FRAME_FILTER_EN; + } else { + cc2520_write_register(priv, CC2520_FRMCTRL0, FRMCTRL0_AUTOACK | + FRMCTRL0_AUTOCRC); + frmfilt0 |= FRMFILT0_FRAME_FILTER_EN; + } + return cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); +} + static const struct ieee802154_ops cc2520_ops = { .owner = THIS_MODULE, .start = cc2520_start, @@ -732,6 +829,7 @@ static const struct ieee802154_ops cc2520_ops = { .set_channel = cc2520_set_channel, .set_hw_addr_filt = cc2520_filter, .set_txpower = cc2520_set_txpower, + .set_promiscuous_mode = cc2520_set_promiscuous_mode, }; static int cc2520_register(struct cc2520_private *priv) @@ -749,7 +847,8 @@ static int cc2520_register(struct cc2520_private *priv) /* We do support only 2.4 Ghz */ priv->hw->phy->supported.channels[0] = 0x7FFF800; - priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT; + priv->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; priv->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER; @@ -919,6 +1018,11 @@ static int cc2520_hw_init(struct cc2520_private *priv) } /* Registers default value: section 28.1 in Datasheet */ + + /* Set the CCA threshold to -50 dBm. This seems to have been copied + * from the TinyOS CC2520 driver and is much higher than the -84 dBm + * threshold suggested in the datasheet. + */ ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A); if (ret) goto err_ret; @@ -955,15 +1059,10 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (ret) goto err_ret; - ret = cc2520_write_register(priv, CC2520_FRMCTRL0, 0x60); - if (ret) - goto err_ret; - - ret = cc2520_write_register(priv, CC2520_FRMCTRL1, 0x03); - if (ret) - goto err_ret; - - ret = cc2520_write_register(priv, CC2520_FRMFILT0, 0x00); + /* Configure registers correctly for this driver. */ + ret = cc2520_write_register(priv, CC2520_FRMCTRL1, + FRMCTRL1_SET_RXENMASK_ON_TX | + FRMCTRL1_IGNORE_TX_UNDERF); if (ret) goto err_ret; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index a9268db4e349..f94392d07126 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -88,7 +88,7 @@ static struct lock_class_key ipvlan_netdev_xmit_lock_key; static struct lock_class_key ipvlan_netdev_addr_lock_key; #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c index 6d2f55959c49..b977d6d33e74 100644 --- a/drivers/net/irda/toim3232-sir.c +++ b/drivers/net/irda/toim3232-sir.c @@ -130,16 +130,6 @@ static int toim3232delay = 150; /* default is 150 ms */ module_param(toim3232delay, int, 0); MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); -#if 0 -static int toim3232flipdtr = 0; /* default is DTR high to reset */ -module_param(toim3232flipdtr, int, 0); -MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); - -static int toim3232fliprts = 0; /* default is RTS high for baud change */ -module_param(toim3232fliptrs, int, 0); -MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); -#endif - static int toim3232_open(struct sir_dev *); static int toim3232_close(struct sir_dev *); static int toim3232_change_speed(struct sir_dev *, unsigned); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index dc7d970bd1c0..a400288cb37b 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -175,7 +175,7 @@ static void loopback_setup(struct net_device *dev) | NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_RXCSUM - | NETIF_F_SCTP_CSUM + | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 06c8bfeaccd6..6a57a005e0ca 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -758,11 +758,11 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_FEATURES \ - (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ NETIF_F_GSO_ROBUST) #define MACVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0fc521941c71..d636d051fac8 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -388,7 +388,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) * check, we either support them all or none. */ if (skb->ip_summed == CHECKSUM_PARTIAL && - !(features & NETIF_F_ALL_CSUM) && + !(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) goto drop; skb_queue_tail(&q->sk.sk_receive_queue, skb); diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f31a4e25cf15..680e88f9915a 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,6 +1,6 @@ # Makefile for Linux PHY drivers -libphy-objs := phy.o phy_device.o mdio_bus.o +libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 65a488f82eb8..18141c022b13 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -72,7 +72,6 @@ static struct phy_driver am79c_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(am79c_driver); diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index f1936b7a7af6..09b0b0aa8d68 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -128,7 +128,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQ2104, @@ -141,7 +140,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQR105, @@ -154,7 +152,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQR405, @@ -167,7 +164,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 2d020a3ec0b5..8a8f6fb2880d 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -190,7 +190,7 @@ static int at803x_resume(struct phy_device *phydev) static int at803x_probe(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct at803x_priv *priv; struct gpio_desc *gpiod_reset; @@ -281,8 +281,8 @@ static void at803x_link_change_notify(struct phy_device *phydev) at803x_context_restore(phydev, &context); - dev_dbg(&phydev->dev, "%s(): phy was reset\n", - __func__); + phydev_dbg(phydev, "%s(): phy was reset\n", + __func__); priv->phy_reset = true; } } else { @@ -310,9 +310,6 @@ static struct phy_driver at803x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, }, { /* ATHEROS 8030 */ .phy_id = ATH8030_PHY_ID, @@ -331,9 +328,6 @@ static struct phy_driver at803x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, }, { /* ATHEROS 8031 */ .phy_id = ATH8031_PHY_ID, @@ -352,9 +346,6 @@ static struct phy_driver at803x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, } }; module_phy_driver(at803x_driver); diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index ddb377e53633..df0416db0b88 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -184,25 +184,25 @@ int bcm_phy_enable_eee(struct phy_device *phydev) /* Enable EEE at PHY level */ val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (val < 0) return val; val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN, phydev->addr, (u32)val); + MDIO_MMD_AN, (u32)val); /* Advertise EEE */ val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (val < 0) return val; val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN, phydev->addr, (u32)val); + MDIO_MMD_AN, (u32)val); return 0; } diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 86b28052bf06..e741bf614c4e 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -56,7 +56,6 @@ static struct phy_driver bcm63xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, @@ -69,7 +68,6 @@ static struct phy_driver bcm63xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(bcm63xx_driver); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 03d4809a9126..bf241a3ec5e5 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -170,7 +170,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) int ret = 0; pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", - dev_name(&phydev->dev), phydev->drv->name, rev, patch); + phydev_name(phydev), phydev->drv->name, rev, patch); /* Dummy read to a register to workaround an issue upon reset where the * internal inverter may not allow the first MDIO transaction to pass @@ -324,7 +324,6 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) .config_aneg = genphy_config_aneg, \ .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_resume, \ - .driver = { .owner = THIS_MODULE }, \ } static struct phy_driver bcm7xxx_driver[] = { @@ -346,7 +345,6 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM7429, .phy_id_mask = 0xfffffff0, @@ -359,7 +357,18 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, +}, { + .phy_id = PHY_ID_BCM7435, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM7435", + .features = PHY_GBIT_FEATURES | + SUPPORTED_Pause | SUPPORTED_Asym_Pause, + .flags = PHY_IS_INTERNAL, + .config_init = bcm7xxx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = bcm7xxx_suspend, + .resume = bcm7xxx_config_init, }, { .phy_id = PHY_BCM_OUI_4, .phy_id_mask = 0xffff0000, @@ -372,7 +381,6 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_BCM_OUI_5, .phy_id_mask = 0xffffff00, @@ -385,7 +393,6 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, } }; static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { @@ -395,6 +402,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7425, 0xfffffff0, }, { PHY_ID_BCM7429, 0xfffffff0, }, { PHY_ID_BCM7439, 0xfffffff0, }, + { PHY_ID_BCM7435, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, { PHY_BCM_OUI_4, 0xffff0000 }, { PHY_BCM_OUI_5, 0xffffff00 }, diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 1eca20452f03..f7ebdcff53e4 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -40,10 +40,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) const __be32 *paddr_end; int len, ret; - if (!phydev->dev.of_node) + if (!phydev->mdio.dev.of_node) return 0; - paddr = of_get_property(phydev->dev.of_node, + paddr = of_get_property(phydev->mdio.dev.of_node, "broadcom,c45-reg-init", &len); if (!paddr) return 0; @@ -163,8 +163,9 @@ static int bcm87xx_did_interrupt(struct phy_device *phydev) reg = phy_read(phydev, BCM87XX_LASI_STATUS); if (reg < 0) { - dev_err(&phydev->dev, - "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg); + phydev_err(phydev, + "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", + reg); return 0; } return (reg & 1) != 0; @@ -200,7 +201,6 @@ static struct phy_driver bcm87xx_driver[] = { .config_intr = bcm87xx_config_intr, .did_interrupt = bcm87xx_did_interrupt, .match_phy_device = bcm8706_match_phy_device, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, @@ -213,7 +213,6 @@ static struct phy_driver bcm87xx_driver[] = { .config_intr = bcm87xx_config_intr, .did_interrupt = bcm87xx_did_interrupt, .match_phy_device = bcm8727_match_phy_device, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(bcm87xx_driver); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 3ce5d9514623..870327efccf7 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -460,7 +460,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, @@ -473,7 +472,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, @@ -486,7 +484,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, @@ -499,7 +496,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, @@ -512,7 +508,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, @@ -525,7 +520,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, @@ -538,7 +532,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, @@ -551,7 +544,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, @@ -564,7 +556,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, @@ -577,7 +568,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, @@ -590,7 +580,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5241, .phy_id_mask = 0xfffffff0, @@ -603,7 +592,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(broadcom_drivers); diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index 27f5464899d4..d339c1afea77 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -114,7 +114,6 @@ static struct phy_driver cis820x_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x000fc440, .name = "Cicada Cis8204", @@ -126,7 +125,6 @@ static struct phy_driver cis820x_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(cis820x_driver); diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 2a328703b4ae..36e3e2033eca 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -156,7 +156,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x0181b8b0, .name = "Davicom DM9161B/C", @@ -168,7 +167,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x0181b8a0, .name = "Davicom DM9161A", @@ -180,7 +178,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00181b80, .name = "Davicom DM9131", @@ -191,7 +188,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(dm91xx_driver); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 47b711739ba9..180f69952779 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -220,9 +220,10 @@ static void rx_timestamp_work(struct work_struct *work); #define BROADCAST_ADDR 31 -static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val) +static inline int broadcast_write(struct phy_device *phydev, u32 regnum, + u16 val) { - return mdiobus_write(bus, BROADCAST_ADDR, regnum, val); + return mdiobus_write(phydev->mdio.bus, BROADCAST_ADDR, regnum, val); } /* Caller must hold extreg_lock. */ @@ -232,7 +233,7 @@ static int ext_read(struct phy_device *phydev, int page, u32 regnum) int val; if (dp83640->clock->page != page) { - broadcast_write(phydev->bus, PAGESEL, page); + broadcast_write(phydev, PAGESEL, page); dp83640->clock->page = page; } val = phy_read(phydev, regnum); @@ -247,11 +248,11 @@ static void ext_write(int broadcast, struct phy_device *phydev, struct dp83640_private *dp83640 = phydev->priv; if (dp83640->clock->page != page) { - broadcast_write(phydev->bus, PAGESEL, page); + broadcast_write(phydev, PAGESEL, page); dp83640->clock->page = page; } if (broadcast) - broadcast_write(phydev->bus, regnum, val); + broadcast_write(phydev, regnum, val); else phy_write(phydev, regnum, val); } @@ -1039,7 +1040,7 @@ static int choose_this_phy(struct dp83640_clock *clock, if (chosen_phy == -1 && !clock->chosen) return 1; - if (chosen_phy == phydev->addr) + if (chosen_phy == phydev->mdio.addr) return 1; return 0; @@ -1103,10 +1104,10 @@ static int dp83640_probe(struct phy_device *phydev) struct dp83640_private *dp83640; int err = -ENOMEM, i; - if (phydev->addr == BROADCAST_ADDR) + if (phydev->mdio.addr == BROADCAST_ADDR) return 0; - clock = dp83640_clock_get_bus(phydev->bus); + clock = dp83640_clock_get_bus(phydev->mdio.bus); if (!clock) goto no_clock; @@ -1132,7 +1133,8 @@ static int dp83640_probe(struct phy_device *phydev) if (choose_this_phy(clock, phydev)) { clock->chosen = dp83640; - clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev); + clock->ptp_clock = ptp_clock_register(&clock->caps, + &phydev->mdio.dev); if (IS_ERR(clock->ptp_clock)) { err = PTR_ERR(clock->ptp_clock); goto no_register; @@ -1158,7 +1160,7 @@ static void dp83640_remove(struct phy_device *phydev) struct list_head *this, *next; struct dp83640_private *tmp, *dp83640 = phydev->priv; - if (phydev->addr == BROADCAST_ADDR) + if (phydev->mdio.addr == BROADCAST_ADDR) return; enable_status_frames(phydev, false); @@ -1490,12 +1492,11 @@ static struct phy_driver dp83640_driver = { .hwtstamp = dp83640_hwtstamp, .rxtstamp = dp83640_rxtstamp, .txtstamp = dp83640_txtstamp, - .driver = {.owner = THIS_MODULE,} }; static int __init dp83640_init(void) { - return phy_driver_register(&dp83640_driver); + return phy_driver_register(&dp83640_driver, THIS_MODULE); } static void __exit dp83640_exit(void) diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 5ce9bef54468..5e14e629c597 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -88,8 +88,6 @@ static struct phy_driver dp83848_driver[] = { /* IRQ related */ .ack_interrupt = dp83848_ack_interrupt, .config_intr = dp83848_config_intr, - - .driver = { .owner = THIS_MODULE, }, }, }; module_phy_driver(dp83848_driver); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 32f10662f4ac..2afa61b51d41 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -103,14 +103,11 @@ static int dp83867_config_intr(struct phy_device *phydev) static int dp83867_of_init(struct phy_device *phydev) { struct dp83867_private *dp83867 = phydev->priv; - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; int ret; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; - - if (!phydev->dev.of_node) + if (!of_node) return -ENODEV; ret = of_property_read_u32(of_node, "ti,rx-internal-delay", @@ -140,7 +137,7 @@ static int dp83867_config_init(struct phy_device *phydev) u16 val, delay; if (!phydev->priv) { - dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867), + dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), GFP_KERNEL); if (!dp83867) return -ENOMEM; @@ -163,7 +160,7 @@ static int dp83867_config_init(struct phy_device *phydev) if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR, phydev->addr); + DP83867_DEVADDR); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN); @@ -175,13 +172,13 @@ static int dp83867_config_init(struct phy_device *phydev) val |= DP83867_RGMII_RX_CLK_DELAY_EN; phy_write_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR, phydev->addr, val); + DP83867_DEVADDR, val); delay = (dp83867->rx_id_delay | (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL, - DP83867_DEVADDR, phydev->addr, delay); + DP83867_DEVADDR, delay); } return 0; @@ -217,8 +214,6 @@ static struct phy_driver dp83867_driver[] = { .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = {.owner = THIS_MODULE,} }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a907743816a8..a9a4edfa23c8 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -95,7 +95,6 @@ static struct phy_driver et1011c_driver[] = { { .flags = PHY_POLL, .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(et1011c_driver); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index e23bf5b90e17..ab9c473d75ea 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -27,7 +27,6 @@ #define MII_REGS_NUM 29 struct fixed_mdio_bus { - int irqs[PHY_MAX_ADDR]; struct mii_bus *mii_bus; struct list_head phys; }; @@ -198,11 +197,11 @@ int fixed_phy_set_link_update(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || !phydev->bus) + if (!phydev || !phydev->mdio.bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->addr) { + if (fp->addr == phydev->mdio.addr) { fp->link_update = link_update; fp->phydev = phydev; return 0; @@ -220,11 +219,11 @@ int fixed_phy_update_state(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || phydev->bus != fmb->mii_bus) + if (!phydev || phydev->mdio.bus != fmb->mii_bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->addr) { + if (fp->addr == phydev->mdio.addr) { #define _UPD(x) if (changed->x) \ fp->status.x = status->x _UPD(link); @@ -256,7 +255,7 @@ int fixed_phy_add(unsigned int irq, int phy_addr, memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); - fmb->irqs[phy_addr] = irq; + fmb->mii_bus->irq[phy_addr] = irq; fp->addr = phy_addr; fp->status = *status; @@ -345,7 +344,7 @@ struct phy_device *fixed_phy_register(unsigned int irq, } of_node_get(np); - phy->dev.of_node = np; + phy->mdio.dev.of_node = np; phy->is_pseudo_fixed_link = true; switch (status->speed) { @@ -395,7 +394,6 @@ static int __init fixed_mdio_bus_init(void) fmb->mii_bus->parent = &pdev->dev; fmb->mii_bus->read = &fixed_mdio_read; fmb->mii_bus->write = &fixed_mdio_write; - fmb->mii_bus->irq = fmb->irqs; ret = mdiobus_register(fmb->mii_bus); if (ret) diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 0dbc445a5fa0..e5f251b91578 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -53,43 +53,43 @@ static int ip175c_config_init(struct phy_device *phydev) if (full_reset_performed == 0) { /* master reset */ - err = mdiobus_write(phydev->bus, 30, 0, 0x175c); + err = mdiobus_write(phydev->mdio.bus, 30, 0, 0x175c); if (err < 0) return err; /* ensure no bus delays overlap reset period */ - err = mdiobus_read(phydev->bus, 30, 0); + err = mdiobus_read(phydev->mdio.bus, 30, 0); /* data sheet specifies reset period is 2 msec */ mdelay(2); /* enable IP175C mode */ - err = mdiobus_write(phydev->bus, 29, 31, 0x175c); + err = mdiobus_write(phydev->mdio.bus, 29, 31, 0x175c); if (err < 0) return err; /* Set MII0 speed and duplex (in PHY mode) */ - err = mdiobus_write(phydev->bus, 29, 22, 0x420); + err = mdiobus_write(phydev->mdio.bus, 29, 22, 0x420); if (err < 0) return err; /* reset switch ports */ for (i = 0; i < 5; i++) { - err = mdiobus_write(phydev->bus, i, + err = mdiobus_write(phydev->mdio.bus, i, MII_BMCR, BMCR_RESET); if (err < 0) return err; } for (i = 0; i < 5; i++) - err = mdiobus_read(phydev->bus, i, MII_BMCR); + err = mdiobus_read(phydev->mdio.bus, i, MII_BMCR); mdelay(2); full_reset_performed = 1; } - if (phydev->addr != 4) { + if (phydev->mdio.addr != 4) { phydev->state = PHY_RUNNING; phydev->speed = SPEED_100; phydev->duplex = DUPLEX_FULL; @@ -184,7 +184,7 @@ static int ip101a_g_config_init(struct phy_device *phydev) static int ip175c_read_status(struct phy_device *phydev) { - if (phydev->addr == 4) /* WAN port */ + if (phydev->mdio.addr == 4) /* WAN port */ genphy_read_status(phydev); else /* Don't need to read status for switch ports */ @@ -195,7 +195,7 @@ static int ip175c_read_status(struct phy_device *phydev) static int ip175c_config_aneg(struct phy_device *phydev) { - if (phydev->addr == 4) /* WAN port */ + if (phydev->mdio.addr == 4) /* WAN port */ genphy_config_aneg(phydev); return 0; @@ -221,7 +221,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &ip175c_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x02430d90, .name = "ICPlus IP1001", @@ -233,7 +232,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x02430c54, .name = "ICPlus IP101A/G", @@ -247,7 +245,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(icplus_driver); diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index a3a5a703635b..f6078376ef50 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -278,7 +278,6 @@ static struct phy_driver lxt97x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001378e0, .name = "LXT971", @@ -289,7 +288,6 @@ static struct phy_driver lxt97x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00137a10, .name = "LXT973-A2", @@ -299,7 +297,6 @@ static struct phy_driver lxt97x_driver[] = { .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, .read_status = lxt973a2_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00137a10, .name = "LXT973", @@ -309,7 +306,6 @@ static struct phy_driver lxt97x_driver[] = { .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, .read_status = genphy_read_status, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(lxt97x_driver); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0240552b50f3..e3eb96443c97 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -137,6 +137,22 @@ MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); +struct marvell_hw_stat { + const char *string; + u8 page; + u8 reg; + u8 bits; +}; + +static struct marvell_hw_stat marvell_hw_stats[] = { + { "phy_receive_errors", 0, 21, 16}, + { "phy_idle_errors", 0, 10, 8 }, +}; + +struct marvell_priv { + u64 stats[ARRAY_SIZE(marvell_hw_stats)]; +}; + static int marvell_ack_interrupt(struct phy_device *phydev) { int err; @@ -284,10 +300,11 @@ static int marvell_of_reg_init(struct phy_device *phydev) const __be32 *paddr; int len, i, saved_page, current_page, page_changed, ret; - if (!phydev->dev.of_node) + if (!phydev->mdio.dev.of_node) return 0; - paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); + paddr = of_get_property(phydev->mdio.dev.of_node, + "marvell,reg-init", &len); if (!paddr || len < (4 * sizeof(*paddr))) return 0; @@ -986,12 +1003,80 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w return 0; } +static int marvell_get_sset_count(struct phy_device *phydev) +{ + return ARRAY_SIZE(marvell_hw_stats); +} + +static void marvell_get_strings(struct phy_device *phydev, u8 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { + memcpy(data + i * ETH_GSTRING_LEN, + marvell_hw_stats[i].string, ETH_GSTRING_LEN); + } +} + +#ifndef UINT64_MAX +#define UINT64_MAX (u64)(~((u64)0)) +#endif +static u64 marvell_get_stat(struct phy_device *phydev, int i) +{ + struct marvell_hw_stat stat = marvell_hw_stats[i]; + struct marvell_priv *priv = phydev->priv; + int err, oldpage; + u64 val; + + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, + stat.page); + if (err < 0) + return UINT64_MAX; + + val = phy_read(phydev, stat.reg); + if (val < 0) { + val = UINT64_MAX; + } else { + val = val & ((1 << stat.bits) - 1); + priv->stats[i] += val; + val = priv->stats[i]; + } + + phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); + + return val; +} + +static void marvell_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) + data[i] = marvell_get_stat(phydev, i); +} + +static int marvell_probe(struct phy_device *phydev) +{ + struct marvell_priv *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, + .probe = marvell_probe, .flags = PHY_HAS_INTERRUPT, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -999,7 +1084,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1112, @@ -1007,6 +1094,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1112", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1014,7 +1102,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1111, @@ -1022,6 +1112,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1111", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, .read_status = &marvell_read_status, @@ -1029,7 +1120,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1118, @@ -1037,6 +1130,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1118", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1118_config_init, .config_aneg = &m88e1118_config_aneg, .read_status = &genphy_read_status, @@ -1044,7 +1138,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = {.owner = THIS_MODULE,}, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1121R, @@ -1052,6 +1148,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1059,7 +1156,9 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1318S, @@ -1067,6 +1166,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1318S", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_aneg = &m88e1318_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1076,7 +1176,9 @@ static struct phy_driver marvell_drivers[] = { .set_wol = &m88e1318_set_wol, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1145, @@ -1084,6 +1186,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1145", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1145_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1091,7 +1194,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1149R, @@ -1099,6 +1204,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1149R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1149_config_init, .config_aneg = &m88e1118_config_aneg, .read_status = &genphy_read_status, @@ -1106,7 +1212,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1240, @@ -1114,6 +1222,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1240", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1121,7 +1230,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1116R, @@ -1129,6 +1240,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1116R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &m88e1116r_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, @@ -1136,7 +1248,9 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1510, @@ -1144,6 +1258,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1510", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1151,7 +1266,9 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E1540, @@ -1159,6 +1276,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1540", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1166,7 +1284,9 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, { .phy_id = MARVELL_PHY_ID_88E3016, @@ -1174,6 +1294,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E3016", .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_aneg = &genphy_config_aneg, .config_init = &m88e3016_config_init, .aneg_done = &marvell_aneg_done, @@ -1183,7 +1304,9 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, }; diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4bde5e728fe0..8c73b2e771dd 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -200,16 +200,10 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); - bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_mdio_free; - } - ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); - goto out_mdio_irq; + goto out_mdio_free; } platform_set_drvdata(pdev, priv); @@ -218,8 +212,6 @@ static int unimac_mdio_probe(struct platform_device *pdev) return 0; -out_mdio_irq: - kfree(bus->irq); out_mdio_free: mdiobus_free(bus); return ret; @@ -230,7 +222,6 @@ static int unimac_mdio_remove(struct platform_device *pdev) struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return 0; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 95f51d7267b3..27ab63064f95 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -159,7 +159,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->phy_mask = pdata->phy_mask; new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; - new_bus->irq = pdata->irqs; + memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); new_bus->parent = dev; if (new_bus->phy_mask == ~0) diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c index f1fc51f655d9..5bb56d126693 100644 --- a/drivers/net/phy/mdio-moxart.c +++ b/drivers/net/phy/mdio-moxart.c @@ -130,13 +130,6 @@ static int moxart_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id); bus->parent = &pdev->dev; - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - /* Setting PHY_IGNORE_INTERRUPT here even if it has no effect, * of_mdiobus_register() sets these PHY_POLL. * Ideally, the interrupt from MAC controller could be used to diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 908e8d486342..308ade0eb1b6 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -34,7 +34,6 @@ struct mdio_mux_child_bus { struct mdio_mux_parent_bus *parent; struct mdio_mux_child_bus *next; int bus_number; - int phy_irq[PHY_MAX_ADDR]; }; /* @@ -149,10 +148,15 @@ int mdio_mux_init(struct device *dev, } cb->bus_number = v; cb->parent = pb; + cb->mii_bus = mdiobus_alloc(); + if (!cb->mii_bus) { + ret_val = -ENOMEM; + of_node_put(child_bus_node); + break; + } cb->mii_bus->priv = cb; - cb->mii_bus->irq = cb->phy_irq; cb->mii_bus->name = "mdio_mux"; snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", pb->parent_id, v); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index fcf4e4df7cc8..47d4f2f263d1 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -113,7 +113,6 @@ struct octeon_mdiobus { resource_size_t mdio_phys; resource_size_t regsize; enum octeon_mdiobus_mode mode; - int phy_irq[PHY_MAX_ADDR]; }; #ifdef CONFIG_CAVIUM_OCTEON_SOC @@ -268,12 +267,13 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, static int octeon_mdiobus_probe(struct platform_device *pdev) { struct octeon_mdiobus *bus; + struct mii_bus *mii_bus; struct resource *res_mem; union cvmx_smix_en smi_en; int err = -ENOENT; - bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) + mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); + if (!mii_bus) return -ENOMEM; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -282,6 +282,8 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENXIO; } + bus = mii_bus->priv; + bus->mii_bus = mii_bus; bus->mdio_phys = res_mem->start; bus->regsize = resource_size(res_mem); @@ -298,16 +300,11 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENOMEM; } - bus->mii_bus = mdiobus_alloc(); - if (!bus->mii_bus) - goto fail; - smi_en.u64 = 0; smi_en.s.en = 1; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->priv = bus; - bus->mii_bus->irq = bus->phy_irq; bus->mii_bus->name = "mdio-octeon"; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); bus->mii_bus->parent = &pdev->dev; @@ -326,7 +323,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return 0; fail_register: mdiobus_free(bus->mii_bus); -fail: smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return err; diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 15bc7f9ea224..f70522c35163 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -96,7 +96,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) struct mii_bus *bus; struct sun4i_mdio_data *data; struct resource *res; - int ret, i; + int ret; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) @@ -108,16 +108,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = PHY_POLL; - data = bus->priv; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->membase = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 88cb4592b6fb..0cba64f1ecf4 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,6 +38,48 @@ #include <asm/irq.h> +int mdiobus_register_device(struct mdio_device *mdiodev) +{ + if (mdiodev->bus->mdio_map[mdiodev->addr]) + return -EBUSY; + + mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; + + return 0; +} +EXPORT_SYMBOL(mdiobus_register_device); + +int mdiobus_unregister_device(struct mdio_device *mdiodev) +{ + if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) + return -EINVAL; + + mdiodev->bus->mdio_map[mdiodev->addr] = NULL; + + return 0; +} +EXPORT_SYMBOL(mdiobus_unregister_device); + +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdiodev = bus->mdio_map[addr]; + + if (!mdiodev) + return NULL; + + if (!(mdiodev->flags & MDIO_DEVICE_FLAG_PHY)) + return NULL; + + return container_of(mdiodev, struct phy_device, mdio); +} +EXPORT_SYMBOL(mdiobus_get_phy); + +bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) +{ + return bus->mdio_map[addr]; +} +EXPORT_SYMBOL(mdiobus_is_registered_device); + /** * mdiobus_alloc_size - allocate a mii_bus structure * @size: extra amount of memory to allocate for private storage. @@ -51,6 +93,7 @@ struct mii_bus *mdiobus_alloc_size(size_t size) struct mii_bus *bus; size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN); size_t alloc_size; + int i; /* If we alloc extra space, it should be aligned */ if (size) @@ -59,11 +102,16 @@ struct mii_bus *mdiobus_alloc_size(size_t size) alloc_size = sizeof(*bus); bus = kzalloc(alloc_size, GFP_KERNEL); - if (bus) { - bus->state = MDIOBUS_ALLOCATED; - if (size) - bus->priv = (void *)bus + aligned_size; - } + if (!bus) + return NULL; + + bus->state = MDIOBUS_ALLOCATED; + if (size) + bus->priv = (void *)bus + aligned_size; + + /* Initialise the interrupts to polling */ + for (i = 0; i < PHY_MAX_ADDR; i++) + bus->irq[i] = PHY_POLL; return bus; } @@ -190,47 +238,48 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) } EXPORT_SYMBOL(of_mdio_find_bus); -/* Walk the list of subnodes of a mdio bus and look for a node that matches the - * phy's address with its 'reg' property. If found, set the of_node pointer for - * the phy. This allows auto-probed pyh devices to be supplied with information - * passed in via DT. +/* Walk the list of subnodes of a mdio bus and look for a node that + * matches the mdio device's address with its 'reg' property. If + * found, set the of_node pointer for the mdio device. This allows + * auto-probed phy devices to be supplied with information passed in + * via DT. */ -static void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) +static void of_mdiobus_link_mdiodev(struct mii_bus *bus, + struct mdio_device *mdiodev) { - struct device *dev = &phydev->dev; + struct device *dev = &mdiodev->dev; struct device_node *child; - if (dev->of_node || !mdio->dev.of_node) + if (dev->of_node || !bus->dev.of_node) return; - for_each_available_child_of_node(mdio->dev.of_node, child) { + for_each_available_child_of_node(bus->dev.of_node, child) { int addr; int ret; ret = of_property_read_u32(child, "reg", &addr); if (ret < 0) { - dev_err(dev, "%s has invalid PHY address\n", + dev_err(dev, "%s has invalid MDIO address\n", child->full_name); continue; } - /* A PHY must have a reg property in the range [0-31] */ + /* A MDIO device must have a reg property in the range [0-31] */ if (addr >= PHY_MAX_ADDR) { - dev_err(dev, "%s PHY address %i is too large\n", + dev_err(dev, "%s MDIO address %i is too large\n", child->full_name, addr); continue; } - if (addr == phydev->addr) { + if (addr == mdiodev->addr) { dev->of_node = child; return; } } } #else /* !IS_ENABLED(CONFIG_OF_MDIO) */ -static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) +static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, + struct mdio_device *mdiodev) { } #endif @@ -243,12 +292,15 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, * Description: Called by a bus driver to bring up all the PHYs * on a given bus, and attach them to the bus. Drivers should use * mdiobus_register() rather than __mdiobus_register() unless they - * need to pass a specific owner module. + * need to pass a specific owner module. MDIO devices which are not + * PHYs will not be brought up by this function. They are expected to + * to be explicitly listed in DT and instantiated by of_mdiobus_register(). * * Returns 0 on success or < 0 on error. */ int __mdiobus_register(struct mii_bus *bus, struct module *owner) { + struct mdio_device *mdiodev; int i, err; if (NULL == bus || NULL == bus->name || @@ -294,11 +346,12 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) error: while (--i >= 0) { - struct phy_device *phydev = bus->phy_map[i]; - if (phydev) { - phy_device_remove(phydev); - phy_device_free(phydev); - } + mdiodev = bus->mdio_map[i]; + if (!mdiodev) + continue; + + mdiodev->device_remove(mdiodev); + mdiodev->device_free(mdiodev); } device_del(&bus->dev); return err; @@ -307,17 +360,19 @@ EXPORT_SYMBOL(__mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { + struct mdio_device *mdiodev; int i; BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *phydev = bus->phy_map[i]; - if (phydev) { - phy_device_remove(phydev); - phy_device_free(phydev); - } + mdiodev = bus->mdio_map[i]; + if (!mdiodev) + continue; + + mdiodev->device_remove(mdiodev); + mdiodev->device_free(mdiodev); } device_del(&bus->dev); } @@ -346,6 +401,18 @@ void mdiobus_free(struct mii_bus *bus) } EXPORT_SYMBOL(mdiobus_free); +/** + * mdiobus_scan - scan a bus for MDIO devices. + * @bus: mii_bus to scan + * @addr: address on bus to scan + * + * This function scans the MDIO bus, looking for devices which can be + * identified using a vendor/product ID in registers 2 and 3. Not all + * MDIO devices have such registers, but PHY devices typically + * do. Hence this function assumes anything found is a PHY, or can be + * treated as a PHY. Other MDIO devices, such as switches, will + * probably not be found during the scan. + */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { struct phy_device *phydev; @@ -359,7 +426,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) * For DT, see if the auto-probed phy has a correspoding child * in the bus node, and set the of_node pointer in this case. */ - of_mdiobus_link_phydev(bus, phydev); + of_mdiobus_link_mdiodev(bus, &phydev->mdio); err = phy_device_register(phydev); if (err) { @@ -476,133 +543,56 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) EXPORT_SYMBOL(mdiobus_write); /** - * mdio_bus_match - determine if given PHY driver supports the given PHY device - * @dev: target PHY device - * @drv: given PHY driver + * mdio_bus_match - determine if given MDIO driver supports the given + * MDIO device + * @dev: target MDIO device + * @drv: given MDIO driver * - * Description: Given a PHY device, and a PHY driver, return 1 if - * the driver supports the device. Otherwise, return 0. + * Description: Given a MDIO device, and a MDIO driver, return 1 if + * the driver supports the device. Otherwise, return 0. This may + * require calling the devices own match function, since different classes + * of MDIO devices have different match criteria. */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { - struct phy_device *phydev = to_phy_device(dev); - struct phy_driver *phydrv = to_phy_driver(drv); - const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); - int i; + struct mdio_device *mdio = to_mdio_device(dev); if (of_driver_match_device(dev, drv)) return 1; - if (phydrv->match_phy_device) - return phydrv->match_phy_device(phydev); - - if (phydev->is_c45) { - for (i = 1; i < num_ids; i++) { - if (!(phydev->c45_ids.devices_in_package & (1 << i))) - continue; + if (mdio->bus_match) + return mdio->bus_match(dev, drv); - if ((phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->c45_ids.device_ids[i] & - phydrv->phy_id_mask)) - return 1; - } - return 0; - } else { - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); - } + return 0; } #ifdef CONFIG_PM - -static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) -{ - struct device_driver *drv = phydev->dev.driver; - struct phy_driver *phydrv = to_phy_driver(drv); - struct net_device *netdev = phydev->attached_dev; - - if (!drv || !phydrv->suspend) - return false; - - /* PHY not attached? May suspend if the PHY has not already been - * suspended as part of a prior call to phy_disconnect() -> - * phy_detach() -> phy_suspend() because the parent netdev might be the - * MDIO bus driver and clock gated at this point. - */ - if (!netdev) - return !phydev->suspended; - - /* Don't suspend PHY if the attched netdev parent may wakeup. - * The parent may point to a PCI device, as in tg3 driver. - */ - if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) - return false; - - /* Also don't suspend PHY if the netdev itself may wakeup. This - * is the case for devices w/o underlaying pwr. mgmt. aware bus, - * e.g. SoC devices. - */ - if (device_may_wakeup(&netdev->dev)) - return false; - - return true; -} - static int mdio_bus_suspend(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); + struct mdio_device *mdio = to_mdio_device(dev); - /* We must stop the state machine manually, otherwise it stops out of - * control, possibly with the phydev->lock held. Upon resume, netdev - * may call phy routines that try to grab the same lock, and that may - * lead to a deadlock. - */ - if (phydev->attached_dev && phydev->adjust_link) - phy_stop_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->suspend) + return mdio->pm_ops->suspend(dev); - if (!mdio_bus_phy_may_suspend(phydev)) - return 0; - - return phy_suspend(phydev); + return 0; } static int mdio_bus_resume(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); - int ret; - - if (!mdio_bus_phy_may_suspend(phydev)) - goto no_resume; - - ret = phy_resume(phydev); - if (ret < 0) - return ret; + struct mdio_device *mdio = to_mdio_device(dev); -no_resume: - if (phydev->attached_dev && phydev->adjust_link) - phy_start_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->resume) + return mdio->pm_ops->resume(dev); return 0; } static int mdio_bus_restore(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); - struct net_device *netdev = phydev->attached_dev; - int ret; - - if (!netdev) - return 0; + struct mdio_device *mdio = to_mdio_device(dev); - ret = phy_init_hw(phydev); - if (ret < 0) - return ret; - - /* The PHY needs to renegotiate. */ - phydev->link = 0; - phydev->state = PHY_UP; - - phy_start_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->restore) + return mdio->pm_ops->restore(dev); return 0; } @@ -623,52 +613,10 @@ static const struct dev_pm_ops mdio_bus_pm_ops = { #endif /* CONFIG_PM */ -static ssize_t -phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); -} -static DEVICE_ATTR_RO(phy_id); - -static ssize_t -phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - const char *mode = NULL; - - if (phy_is_internal(phydev)) - mode = "internal"; - else - mode = phy_modes(phydev->interface); - - return sprintf(buf, "%s\n", mode); -} -static DEVICE_ATTR_RO(phy_interface); - -static ssize_t -phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "%d\n", phydev->has_fixups); -} -static DEVICE_ATTR_RO(phy_has_fixups); - -static struct attribute *mdio_dev_attrs[] = { - &dev_attr_phy_id.attr, - &dev_attr_phy_interface.attr, - &dev_attr_phy_has_fixups.attr, - NULL, -}; -ATTRIBUTE_GROUPS(mdio_dev); - struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .pm = MDIO_BUS_PM_OPS, - .dev_groups = mdio_dev_groups, }; EXPORT_SYMBOL(mdio_bus_type); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c new file mode 100644 index 000000000000..9c88e6749b9a --- /dev/null +++ b/drivers/net/phy/mdio_device.c @@ -0,0 +1,171 @@ +/* Framework for MDIO devices, other than PHYs. + * + * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mdio.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/unistd.h> + +void mdio_device_free(struct mdio_device *mdiodev) +{ + put_device(&mdiodev->dev); +} +EXPORT_SYMBOL(mdio_device_free); + +static void mdio_device_release(struct device *dev) +{ + kfree(to_mdio_device(dev)); +} + +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdiodev; + + /* We allocate the device, and initialize the default values */ + mdiodev = kzalloc(sizeof(*mdiodev), GFP_KERNEL); + if (!mdiodev) + return ERR_PTR(-ENOMEM); + + mdiodev->dev.release = mdio_device_release; + mdiodev->dev.parent = &bus->dev; + mdiodev->dev.bus = &mdio_bus_type; + mdiodev->device_free = mdio_device_free; + mdiodev->device_remove = mdio_device_remove; + mdiodev->bus = bus; + mdiodev->addr = addr; + + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); + + device_initialize(&mdiodev->dev); + + return mdiodev; +} +EXPORT_SYMBOL(mdio_device_create); + +/** + * mdio_device_register - Register the mdio device on the MDIO bus + * @mdiodev: mdio_device structure to be added to the MDIO bus + */ +int mdio_device_register(struct mdio_device *mdiodev) +{ + int err; + + dev_info(&mdiodev->dev, "mdio_device_register\n"); + + err = mdiobus_register_device(mdiodev); + if (err) + return err; + + err = device_add(&mdiodev->dev); + if (err) { + pr_err("MDIO %d failed to add\n", mdiodev->addr); + goto out; + } + + return 0; + + out: + mdiobus_unregister_device(mdiodev); + return err; +} +EXPORT_SYMBOL(mdio_device_register); + +/** + * mdio_device_remove - Remove a previously registered mdio device from the + * MDIO bus + * @mdiodev: mdio_device structure to remove + * + * This doesn't free the mdio_device itself, it merely reverses the effects + * of mdio_device_register(). Use mdio_device_free() to free the device + * after calling this function. + */ +void mdio_device_remove(struct mdio_device *mdiodev) +{ + device_del(&mdiodev->dev); + mdiobus_unregister_device(mdiodev); +} +EXPORT_SYMBOL(mdio_device_remove); + +/** + * mdio_probe - probe an MDIO device + * @dev: device to probe + * + * Description: Take care of setting up the mdio_device structure + * and calling the driver to probe the device. + */ +static int mdio_probe(struct device *dev) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct device_driver *drv = mdiodev->dev.driver; + struct mdio_driver *mdiodrv = to_mdio_driver(drv); + int err = 0; + + if (mdiodrv->probe) + err = mdiodrv->probe(mdiodev); + + return err; +} + +static int mdio_remove(struct device *dev) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct device_driver *drv = mdiodev->dev.driver; + struct mdio_driver *mdiodrv = to_mdio_driver(drv); + + if (mdiodrv->remove) + mdiodrv->remove(mdiodev); + + return 0; +} + +/** + * mdio_driver_register - register an mdio_driver with the MDIO layer + * @new_driver: new mdio_driver to register + */ +int mdio_driver_register(struct mdio_driver *drv) +{ + struct mdio_driver_common *mdiodrv = &drv->mdiodrv; + int retval; + + pr_info("mdio_driver_register: %s\n", mdiodrv->driver.name); + + mdiodrv->driver.bus = &mdio_bus_type; + mdiodrv->driver.probe = mdio_probe; + mdiodrv->driver.remove = mdio_remove; + + retval = driver_register(&mdiodrv->driver); + if (retval) { + pr_err("%s: Error %d in registering driver\n", + mdiodrv->driver.name, retval); + + return retval; + } + + return 0; +} +EXPORT_SYMBOL(mdio_driver_register); + +void mdio_driver_unregister(struct mdio_driver *drv) +{ + struct mdio_driver_common *mdiodrv = &drv->mdiodrv; + + driver_unregister(&mdiodrv->driver); +} +EXPORT_SYMBOL(mdio_driver_unregister); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index cf6312fafea5..03833dbfca67 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -73,6 +73,17 @@ #define PS_TO_REG 200 +struct kszphy_hw_stat { + const char *string; + u8 reg; + u8 bits; +}; + +static struct kszphy_hw_stat kszphy_hw_stats[] = { + { "phy_receive_errors", 21, 16}, + { "phy_idle_errors", 10, 8 }, +}; + struct kszphy_type { u32 led_mode_reg; u16 interrupt_level_mask; @@ -86,6 +97,7 @@ struct kszphy_priv { int led_mode; bool rmii_ref_clk_sel; bool rmii_ref_clk_sel_val; + u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; }; static const struct kszphy_type ksz8021_type = { @@ -212,7 +224,7 @@ static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val) rc = phy_write(phydev, reg, temp); out: if (rc < 0) - dev_err(&phydev->dev, "failed to set led mode\n"); + phydev_err(phydev, "failed to set led mode\n"); return rc; } @@ -231,7 +243,7 @@ static int kszphy_broadcast_disable(struct phy_device *phydev) ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF); out: if (ret) - dev_err(&phydev->dev, "failed to disable broadcast address\n"); + phydev_err(phydev, "failed to disable broadcast address\n"); return ret; } @@ -251,7 +263,7 @@ static int kszphy_nand_tree_disable(struct phy_device *phydev) ret & ~KSZPHY_OMSO_NAND_TREE_ON); out: if (ret) - dev_err(&phydev->dev, "failed to disable NAND tree mode\n"); + phydev_err(phydev, "failed to disable NAND tree mode\n"); return ret; } @@ -276,7 +288,8 @@ static int kszphy_config_init(struct phy_device *phydev) if (priv->rmii_ref_clk_sel) { ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); if (ret) { - dev_err(&phydev->dev, "failed to set rmii reference clock\n"); + phydev_err(phydev, + "failed to set rmii reference clock\n"); return ret; } } @@ -337,11 +350,20 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev, static int ksz9021_config_init(struct phy_device *phydev) { - const struct device *dev = &phydev->dev; + const struct device *dev = &phydev->mdio.dev; const struct device_node *of_node = dev->of_node; + const struct device *dev_walker; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + /* The Micrel driver has a deprecated option to place phy OF + * properties in the MAC node. Walk up the tree of devices to + * find a device with an OF node. + */ + dev_walker = &phydev->mdio.dev; + do { + of_node = dev_walker->of_node; + dev_walker = dev_walker->parent; + + } while (!of_node && dev_walker); if (of_node) { ksz9021_load_values_from_of(phydev, of_node, @@ -449,7 +471,7 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev) static int ksz9031_config_init(struct phy_device *phydev) { - const struct device *dev = &phydev->dev; + const struct device *dev = &phydev->mdio.dev; const struct device_node *of_node = dev->of_node; static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; static const char *rx_data_skews[4] = { @@ -461,9 +483,17 @@ static int ksz9031_config_init(struct phy_device *phydev) "txd2-skew-ps", "txd3-skew-ps" }; static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; + const struct device *dev_walker; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + /* The Micrel driver has a deprecated option to place phy OF + * properties in the MAC node. Walk up the tree of devices to + * find a device with an OF node. + */ + dev_walker = &phydev->mdio.dev; + do { + of_node = dev_walker->of_node; + dev_walker = dev_walker->parent; + } while (!of_node && dev_walker); if (of_node) { ksz9031_of_load_skew_values(phydev, of_node, @@ -560,15 +590,60 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, { } +static int kszphy_get_sset_count(struct phy_device *phydev) +{ + return ARRAY_SIZE(kszphy_hw_stats); +} + +static void kszphy_get_strings(struct phy_device *phydev, u8 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { + memcpy(data + i * ETH_GSTRING_LEN, + kszphy_hw_stats[i].string, ETH_GSTRING_LEN); + } +} + +#ifndef UINT64_MAX +#define UINT64_MAX (u64)(~((u64)0)) +#endif +static u64 kszphy_get_stat(struct phy_device *phydev, int i) +{ + struct kszphy_hw_stat stat = kszphy_hw_stats[i]; + struct kszphy_priv *priv = phydev->priv; + u64 val; + + val = phy_read(phydev, stat.reg); + if (val < 0) { + val = UINT64_MAX; + } else { + val = val & ((1 << stat.bits) - 1); + priv->stats[i] += val; + val = priv->stats[i]; + } + + return val; +} + +static void kszphy_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) + data[i] = kszphy_get_stat(phydev, i); +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; - const struct device_node *np = phydev->dev.of_node; + const struct device_node *np = phydev->mdio.dev.of_node; struct kszphy_priv *priv; struct clk *clk; int ret; - priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -583,15 +658,15 @@ static int kszphy_probe(struct phy_device *phydev) priv->led_mode = -1; if (priv->led_mode > 3) { - dev_err(&phydev->dev, "invalid led mode: 0x%02x\n", - priv->led_mode); + phydev_err(phydev, "invalid led mode: 0x%02x\n", + priv->led_mode); priv->led_mode = -1; } } else { priv->led_mode = -1; } - clk = devm_clk_get(&phydev->dev, "rmii-ref"); + clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref"); /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ if (!IS_ERR_OR_NULL(clk)) { unsigned long rate = clk_get_rate(clk); @@ -606,7 +681,8 @@ static int kszphy_probe(struct phy_device *phydev) } else if (rate > 49500000 && rate < 50500000) { priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz; } else { - dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate); + phydev_err(phydev, "Clock rate out of range: %ld\n", + rate); return -EINVAL; } } @@ -633,9 +709,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8021, .phy_id_mask = 0x00ffffff, @@ -650,9 +728,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8031, .phy_id_mask = 0x00ffffff, @@ -667,9 +747,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8041, .phy_id_mask = 0x00fffff0, @@ -684,9 +766,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8041RNLI, .phy_id_mask = 0x00fffff0, @@ -701,9 +785,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8051, .phy_id_mask = 0x00fffff0, @@ -718,9 +804,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", @@ -734,9 +822,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8081, .name = "Micrel KSZ8081 or KSZ8091", @@ -750,9 +840,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8061, .name = "Micrel KSZ8061", @@ -764,9 +856,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@ -779,11 +873,13 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, .read_mmd_indirect = ksz9021_rd_mmd_phyreg, .write_mmd_indirect = ksz9021_wr_mmd_phyreg, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = 0x00fffff0, @@ -796,9 +892,11 @@ static struct phy_driver ksphy_driver[] = { .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = 0x00fffff0, @@ -808,9 +906,11 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ886X, .phy_id_mask = 0x00fffff0, @@ -820,9 +920,11 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index c0a20ebd083b..15f820648f82 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -68,7 +68,7 @@ int lan88xx_suspend(struct phy_device *phydev) static int lan88xx_probe(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -78,10 +78,9 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; /* these values can be used to identify internal PHY */ - priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, - 3, phydev->addr); + priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, 3); priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV, - 3, phydev->addr); + 3); phydev->priv = priv; @@ -90,7 +89,7 @@ static int lan88xx_probe(struct phy_device *phydev) static void lan88xx_remove(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv = phydev->priv; if (priv) @@ -130,8 +129,6 @@ static struct phy_driver microchip_phy_driver[] = { .suspend = lan88xx_suspend, .resume = genphy_resume, .set_wol = lan88xx_set_wol, - - .driver = { .owner = THIS_MODULE, } } }; module_phy_driver(microchip_phy_driver); diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 0a7b9c7f09a2..2a1b490bc587 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -140,7 +140,6 @@ static struct phy_driver dp83865_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, - .driver = {.owner = THIS_MODULE,} } }; module_phy_driver(dp83865_driver); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 47cd306dbb3c..8763bb20988a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -319,7 +319,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { u32 speed = ethtool_cmd_speed(cmd); - if (cmd->phy_address != phydev->addr) + if (cmd->phy_address != phydev->mdio.addr) return -EINVAL; /* We make sure that we don't pass unsupported values in to the PHY */ @@ -375,7 +375,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->port = PORT_BNC; else cmd->port = PORT_MII; - cmd->phy_address = phydev->addr; + cmd->phy_address = phydev->mdio.addr; cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; @@ -403,16 +403,17 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: - mii_data->phy_id = phydev->addr; + mii_data->phy_id = phydev->mdio.addr; /* fall through */ case SIOCGMIIREG: - mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, + mii_data->val_out = mdiobus_read(phydev->mdio.bus, + mii_data->phy_id, mii_data->reg_num); return 0; case SIOCSMIIREG: - if (mii_data->phy_id == phydev->addr) { + if (mii_data->phy_id == phydev->mdio.addr) { switch (mii_data->reg_num) { case MII_BMCR: if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { @@ -445,10 +446,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } } - mdiobus_write(phydev->bus, mii_data->phy_id, + mdiobus_write(phydev->mdio.bus, mii_data->phy_id, mii_data->reg_num, val); - if (mii_data->phy_id == phydev->addr && + if (mii_data->phy_id == phydev->mdio.addr && mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); @@ -643,7 +644,7 @@ int phy_start_interrupts(struct phy_device *phydev) if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", phydev) < 0) { pr_warn("%s: Can't get IRQ %d (PHY)\n", - phydev->bus->name, phydev->irq); + phydev->mdio.bus->name, phydev->irq); phydev->irq = PHY_POLL; return 0; } @@ -995,8 +996,9 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - dev_dbg(&phydev->dev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), phy_state_to_str(phydev->state)); + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, PHY_STATE_TIME * HZ); @@ -1028,7 +1030,6 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * @phydev: The PHY device bus * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * * Description: it reads data from the MMD registers (clause 22 to access to * clause 45) of the specified phy address. @@ -1038,14 +1039,14 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Read reg 14 // Read MMD data */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr) +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad) { struct phy_driver *phydrv = phydev->drv; + int addr = phydev->mdio.addr; int value = -1; if (!phydrv->read_mmd_indirect) { - struct mii_bus *bus = phydev->bus; + struct mii_bus *bus = phydev->mdio.bus; mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, prtad, devad, addr); @@ -1065,7 +1066,6 @@ EXPORT_SYMBOL(phy_read_mmd_indirect); * @phydev: The PHY device * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * @data: data to write in the MMD register * * Description: Write data from the MMD registers of the specified @@ -1077,12 +1077,13 @@ EXPORT_SYMBOL(phy_read_mmd_indirect); * 3) Write reg 14 // Write MMD data */ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr, u32 data) + int devad, u32 data) { struct phy_driver *phydrv = phydev->drv; + int addr = phydev->mdio.addr; if (!phydrv->write_mmd_indirect) { - struct mii_bus *bus = phydev->bus; + struct mii_bus *bus = phydev->mdio.bus; mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, prtad, devad, addr); @@ -1129,7 +1130,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* First check if the EEE ability is supported */ eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, - MDIO_MMD_PCS, phydev->addr); + MDIO_MMD_PCS); if (eee_cap <= 0) goto eee_exit_err; @@ -1141,12 +1142,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) * the EEE advertising registers. */ eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (eee_lp <= 0) goto eee_exit_err; eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (eee_adv <= 0) goto eee_exit_err; @@ -1160,15 +1161,13 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) * clock while it is signaling LPI. */ int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS, - phydev->addr); + MDIO_MMD_PCS); if (val < 0) return val; val |= MDIO_PCS_CTRL1_CLKSTOP_EN; phy_write_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS, phydev->addr, - val); + MDIO_MMD_PCS, val); } return 0; /* EEE supported */ @@ -1187,8 +1186,7 @@ EXPORT_SYMBOL(phy_init_eee); */ int phy_get_eee_err(struct phy_device *phydev) { - return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, - MDIO_MMD_PCS, phydev->addr); + return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); } EXPORT_SYMBOL(phy_get_eee_err); @@ -1205,22 +1203,19 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) int val; /* Get Supported EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, - MDIO_MMD_PCS, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); if (val < 0) return val; data->supported = mmd_eee_cap_to_ethtool_sup_t(val); /* Get advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); if (val < 0) return val; data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); /* Get LP advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, - MDIO_MMD_AN, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN); if (val < 0) return val; data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); @@ -1240,8 +1235,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); - phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, - phydev->addr, val); + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); return 0; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0bfbabad4431..903737adfc01 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -43,15 +43,31 @@ MODULE_LICENSE("GPL"); void phy_device_free(struct phy_device *phydev) { - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); } EXPORT_SYMBOL(phy_device_free); +static void phy_mdio_device_free(struct mdio_device *mdiodev) +{ + struct phy_device *phydev; + + phydev = container_of(mdiodev, struct phy_device, mdio); + phy_device_free(phydev); +} + static void phy_device_release(struct device *dev) { kfree(to_phy_device(dev)); } +static void phy_mdio_device_remove(struct mdio_device *mdiodev) +{ + struct phy_device *phydev; + + phydev = container_of(mdiodev, struct phy_device, mdio); + phy_device_remove(phydev); +} + enum genphy_driver { GENPHY_DRV_1G, GENPHY_DRV_10G, @@ -63,9 +79,118 @@ static struct phy_driver genphy_driver[GENPHY_DRV_MAX]; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); +#ifdef CONFIG_PM +static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) +{ + struct device_driver *drv = phydev->mdio.dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); + struct net_device *netdev = phydev->attached_dev; + + if (!drv || !phydrv->suspend) + return false; + + /* PHY not attached? May suspend if the PHY has not already been + * suspended as part of a prior call to phy_disconnect() -> + * phy_detach() -> phy_suspend() because the parent netdev might be the + * MDIO bus driver and clock gated at this point. + */ + if (!netdev) + return !phydev->suspended; + + /* Don't suspend PHY if the attached netdev parent may wakeup. + * The parent may point to a PCI device, as in tg3 driver. + */ + if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) + return false; + + /* Also don't suspend PHY if the netdev itself may wakeup. This + * is the case for devices w/o underlaying pwr. mgmt. aware bus, + * e.g. SoC devices. + */ + if (device_may_wakeup(&netdev->dev)) + return false; + + return true; +} + +static int mdio_bus_phy_suspend(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + + /* We must stop the state machine manually, otherwise it stops out of + * control, possibly with the phydev->lock held. Upon resume, netdev + * may call phy routines that try to grab the same lock, and that may + * lead to a deadlock. + */ + if (phydev->attached_dev && phydev->adjust_link) + phy_stop_machine(phydev); + + if (!mdio_bus_phy_may_suspend(phydev)) + return 0; + + return phy_suspend(phydev); +} + +static int mdio_bus_phy_resume(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + int ret; + + if (!mdio_bus_phy_may_suspend(phydev)) + goto no_resume; + + ret = phy_resume(phydev); + if (ret < 0) + return ret; + +no_resume: + if (phydev->attached_dev && phydev->adjust_link) + phy_start_machine(phydev); + + return 0; +} + +static int mdio_bus_phy_restore(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + struct net_device *netdev = phydev->attached_dev; + int ret; + + if (!netdev) + return 0; + + ret = phy_init_hw(phydev); + if (ret < 0) + return ret; + + /* The PHY needs to renegotiate. */ + phydev->link = 0; + phydev->state = PHY_UP; + + phy_start_machine(phydev); + + return 0; +} + +static const struct dev_pm_ops mdio_bus_phy_pm_ops = { + .suspend = mdio_bus_phy_suspend, + .resume = mdio_bus_phy_resume, + .freeze = mdio_bus_phy_suspend, + .thaw = mdio_bus_phy_resume, + .restore = mdio_bus_phy_restore, +}; + +#define MDIO_BUS_PHY_PM_OPS (&mdio_bus_phy_pm_ops) + +#else + +#define MDIO_BUS_PHY_PM_OPS NULL + +#endif /* CONFIG_PM */ + /** * phy_register_fixup - creates a new phy_fixup and adds it to the list - * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID) + * @bus_id: A string which matches phydev->mdio.dev.bus_id (or PHY_ANY_ID) * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY) * It can also be PHY_ANY_UID * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before @@ -114,7 +239,7 @@ EXPORT_SYMBOL(phy_register_fixup_for_id); */ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup) { - if (strcmp(fixup->bus_id, dev_name(&phydev->dev)) != 0) + if (strcmp(fixup->bus_id, phydev_name(phydev)) != 0) if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0) return 0; @@ -148,18 +273,59 @@ static int phy_scan_fixups(struct phy_device *phydev) return 0; } +static int phy_bus_match(struct device *dev, struct device_driver *drv) +{ + struct phy_device *phydev = to_phy_device(dev); + struct phy_driver *phydrv = to_phy_driver(drv); + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; + + if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)) + return 0; + + if (phydrv->match_phy_device) + return phydrv->match_phy_device(phydev); + + if (phydev->is_c45) { + for (i = 1; i < num_ids; i++) { + if (!(phydev->c45_ids.devices_in_package & (1 << i))) + continue; + + if ((phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->c45_ids.device_ids[i] & + phydrv->phy_id_mask)) + return 1; + } + return 0; + } else { + return (phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->phy_id & phydrv->phy_id_mask); + } +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { struct phy_device *dev; + struct mdio_device *mdiodev; /* We allocate the device, and initialize the default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); - dev->dev.release = phy_device_release; + mdiodev = &dev->mdio; + mdiodev->dev.release = phy_device_release; + mdiodev->dev.parent = &bus->dev; + mdiodev->dev.bus = &mdio_bus_type; + mdiodev->bus = bus; + mdiodev->pm_ops = MDIO_BUS_PHY_PM_OPS; + mdiodev->bus_match = phy_bus_match; + mdiodev->addr = addr; + mdiodev->flags = MDIO_DEVICE_FLAG_PHY; + mdiodev->device_free = phy_mdio_device_free; + mdiodev->device_remove = phy_mdio_device_remove; dev->speed = 0; dev->duplex = -1; @@ -171,15 +337,11 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, dev->autoneg = AUTONEG_ENABLE; dev->is_c45 = is_c45; - dev->addr = addr; dev->phy_id = phy_id; if (c45_ids) dev->c45_ids = *c45_ids; - dev->bus = bus; - dev->dev.parent = &bus->dev; - dev->dev.bus = &mdio_bus_type; - dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL; - dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); + dev->irq = bus->irq[addr]; + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); dev->state = PHY_DOWN; @@ -199,7 +361,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, */ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); - device_initialize(&dev->dev); + device_initialize(&mdiodev->dev); return dev; } @@ -373,6 +535,48 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) } EXPORT_SYMBOL(get_phy_device); +static ssize_t +phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); +} +static DEVICE_ATTR_RO(phy_id); + +static ssize_t +phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; + + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); +} +static DEVICE_ATTR_RO(phy_interface); + +static ssize_t +phy_has_fixups_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "%d\n", phydev->has_fixups); +} +static DEVICE_ATTR_RO(phy_has_fixups); + +static struct attribute *phy_dev_attrs[] = { + &dev_attr_phy_id.attr, + &dev_attr_phy_interface.attr, + &dev_attr_phy_has_fixups.attr, + NULL, +}; +ATTRIBUTE_GROUPS(phy_dev); + /** * phy_device_register - Register the phy device on the MDIO bus * @phydev: phy_device structure to be added to the MDIO bus @@ -381,28 +585,29 @@ int phy_device_register(struct phy_device *phydev) { int err; - /* Don't register a phy if one is already registered at this address */ - if (phydev->bus->phy_map[phydev->addr]) - return -EINVAL; - phydev->bus->phy_map[phydev->addr] = phydev; + err = mdiobus_register_device(&phydev->mdio); + if (err) + return err; /* Run all of the fixups for this PHY */ err = phy_scan_fixups(phydev); if (err) { - pr_err("PHY %d failed to initialize\n", phydev->addr); + pr_err("PHY %d failed to initialize\n", phydev->mdio.addr); goto out; } - err = device_add(&phydev->dev); + phydev->mdio.dev.groups = phy_dev_groups; + + err = device_add(&phydev->mdio.dev); if (err) { - pr_err("PHY %d failed to add\n", phydev->addr); + pr_err("PHY %d failed to add\n", phydev->mdio.addr); goto out; } return 0; out: - phydev->bus->phy_map[phydev->addr] = NULL; + mdiobus_unregister_device(&phydev->mdio); return err; } EXPORT_SYMBOL(phy_device_register); @@ -417,11 +622,8 @@ EXPORT_SYMBOL(phy_device_register); */ void phy_device_remove(struct phy_device *phydev) { - struct mii_bus *bus = phydev->bus; - int addr = phydev->addr; - - device_del(&phydev->dev); - bus->phy_map[addr] = NULL; + device_del(&phydev->mdio.dev); + mdiobus_unregister_device(&phydev->mdio); } EXPORT_SYMBOL(phy_device_remove); @@ -431,11 +633,13 @@ EXPORT_SYMBOL(phy_device_remove); */ struct phy_device *phy_find_first(struct mii_bus *bus) { + struct phy_device *phydev; int addr; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (bus->phy_map[addr]) - return bus->phy_map[addr]; + phydev = mdiobus_get_phy(bus, addr); + if (phydev) + return phydev; } return NULL; } @@ -607,6 +811,33 @@ int phy_init_hw(struct phy_device *phydev) } EXPORT_SYMBOL(phy_init_hw); +void phy_attached_info(struct phy_device *phydev) +{ + phy_attached_print(phydev, NULL); +} +EXPORT_SYMBOL(phy_attached_info); + +#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) +{ + if (!fmt) { + dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", + phydev->drv->name, phydev_name(phydev), + phydev->irq); + } else { + va_list ap; + + dev_info(&phydev->mdio.dev, ATTACHED_FMT, + phydev->drv->name, phydev_name(phydev), + phydev->irq); + + va_start(ap, fmt); + vprintk(fmt, ap); + va_end(ap); + } +} +EXPORT_SYMBOL(phy_attached_print); + /** * phy_attach_direct - attach a network device to a given PHY device pointer * @dev: network device to attach @@ -625,8 +856,8 @@ EXPORT_SYMBOL(phy_init_hw); int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface) { - struct mii_bus *bus = phydev->bus; - struct device *d = &phydev->dev; + struct mii_bus *bus = phydev->mdio.bus; + struct device *d = &phydev->mdio.dev; int err; if (!try_module_get(bus->owner)) { @@ -641,9 +872,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, */ if (!d->driver) { if (phydev->is_c45) - d->driver = &genphy_driver[GENPHY_DRV_10G].driver; + d->driver = + &genphy_driver[GENPHY_DRV_10G].mdiodrv.driver; else - d->driver = &genphy_driver[GENPHY_DRV_1G].driver; + d->driver = + &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; err = d->driver->probe(d); if (err >= 0) @@ -744,8 +977,9 @@ void phy_detach(struct phy_device *phydev) * real driver could be loaded */ for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) { - if (phydev->dev.driver == &genphy_driver[i].driver) { - device_release_driver(&phydev->dev); + if (phydev->mdio.dev.driver == + &genphy_driver[i].mdiodrv.driver) { + device_release_driver(&phydev->mdio.dev); break; } } @@ -754,16 +988,16 @@ void phy_detach(struct phy_device *phydev) * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. */ - bus = phydev->bus; + bus = phydev->mdio.bus; - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); module_put(bus->owner); } EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { - struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); + struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; int ret = 0; @@ -786,7 +1020,7 @@ EXPORT_SYMBOL(phy_suspend); int phy_resume(struct phy_device *phydev) { - struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); + struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; if (phydrv->resume) @@ -1303,7 +1537,7 @@ EXPORT_SYMBOL(phy_set_max_speed); static void of_set_phy_supported(struct phy_device *phydev) { - struct device_node *node = phydev->dev.of_node; + struct device_node *node = phydev->mdio.dev.of_node; u32 max_speed; if (!IS_ENABLED(CONFIG_OF_MDIO)) @@ -1327,7 +1561,7 @@ static void of_set_phy_supported(struct phy_device *phydev) static int phy_probe(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); - struct device_driver *drv = phydev->dev.driver; + struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); int err = 0; @@ -1382,17 +1616,20 @@ static int phy_remove(struct device *dev) /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register + * @owner: module owning this PHY */ -int phy_driver_register(struct phy_driver *new_driver) +int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; - new_driver->driver.name = new_driver->name; - new_driver->driver.bus = &mdio_bus_type; - new_driver->driver.probe = phy_probe; - new_driver->driver.remove = phy_remove; + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; + new_driver->mdiodrv.driver.name = new_driver->name; + new_driver->mdiodrv.driver.bus = &mdio_bus_type; + new_driver->mdiodrv.driver.probe = phy_probe; + new_driver->mdiodrv.driver.remove = phy_remove; + new_driver->mdiodrv.driver.owner = owner; - retval = driver_register(&new_driver->driver); + retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", new_driver->name, retval); @@ -1406,12 +1643,13 @@ int phy_driver_register(struct phy_driver *new_driver) } EXPORT_SYMBOL(phy_driver_register); -int phy_drivers_register(struct phy_driver *new_driver, int n) +int phy_drivers_register(struct phy_driver *new_driver, int n, + struct module *owner) { int i, ret = 0; for (i = 0; i < n; i++) { - ret = phy_driver_register(new_driver + i); + ret = phy_driver_register(new_driver + i, owner); if (ret) { while (i-- > 0) phy_driver_unregister(new_driver + i); @@ -1424,7 +1662,7 @@ EXPORT_SYMBOL(phy_drivers_register); void phy_driver_unregister(struct phy_driver *drv) { - driver_unregister(&drv->driver); + driver_unregister(&drv->mdiodrv.driver); } EXPORT_SYMBOL(phy_driver_unregister); @@ -1452,7 +1690,6 @@ static struct phy_driver genphy_driver[] = { .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, @@ -1464,7 +1701,6 @@ static struct phy_driver genphy_driver[] = { .read_status = gen10g_read_status, .suspend = gen10g_suspend, .resume = gen10g_resume, - .driver = {.owner = THIS_MODULE, }, } }; static int __init phy_init(void) @@ -1476,7 +1712,7 @@ static int __init phy_init(void) return rc; rc = phy_drivers_register(genphy_driver, - ARRAY_SIZE(genphy_driver)); + ARRAY_SIZE(genphy_driver), THIS_MODULE); if (rc) mdio_bus_exit(); diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index be4c6f7c3645..d470db89e8dd 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -122,7 +122,6 @@ static struct phy_driver qs6612_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(qs6612_driver); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 43ab691362d4..aadd6e9f54ad 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -124,7 +124,6 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", @@ -135,7 +134,6 @@ static struct phy_driver realtek_drvs[] = { .read_status = &genphy_read_status, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", @@ -148,7 +146,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc915, .name = "RTL8211E Gigabit Ethernet", @@ -161,7 +158,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc916, .name = "RTL8211F Gigabit Ethernet", @@ -175,7 +171,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211f_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE }, }, }; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index dc2da8770918..e485f2653c82 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -44,7 +44,7 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { int __maybe_unused len; - struct device *dev __maybe_unused = &phydev->dev; + struct device *dev __maybe_unused = &phydev->mdio.dev; struct device_node *of_node __maybe_unused = dev->of_node; int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); int enable_energy = 1; @@ -171,8 +171,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ .phy_id_mask = 0xfffffff0, @@ -194,8 +192,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ .phy_id_mask = 0xfffffff0, @@ -217,8 +213,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */ .phy_id_mask = 0xfffffff0, @@ -239,8 +233,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */ .phy_id_mask = 0xfffffff0, @@ -262,8 +254,27 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, +}, { + .phy_id = 0x0007c110, + .phy_id_mask = 0xfffffff0, + .name = "SMSC LAN8740", + + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + + /* basic functions */ + .config_aneg = genphy_config_aneg, + .read_status = lan87xx_read_status, + .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, - .driver = { .owner = THIS_MODULE, } + /* IRQ related */ + .ack_interrupt = smsc_phy_ack_interrupt, + .config_intr = smsc_phy_config_intr, + + .suspend = genphy_suspend, + .resume = genphy_resume, } }; module_phy_driver(smsc_phy_driver); @@ -278,6 +289,7 @@ static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0c0, 0xfffffff0 }, { 0x0007c0d0, 0xfffffff0 }, { 0x0007c0f0, 0xfffffff0 }, + { 0x0007c110, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 3fc199b773e6..d00cfb64529e 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -95,7 +95,6 @@ static struct phy_driver ste10xp_pdriver[] = { .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = {.owner = THIS_MODULE,} }, { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, @@ -109,7 +108,6 @@ static struct phy_driver ste10xp_pdriver[] = { .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = {.owner = THIS_MODULE,} } }; module_phy_driver(ste10xp_pdriver); diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 07463fcca212..fb2cef764e9a 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -108,7 +108,6 @@ static struct phy_driver teranetics_driver[] = { .config_aneg = teranetics_config_aneg, .read_status = teranetics_read_status, .match_phy_device = teranetics_match_phy_device, - .driver = { .owner = THIS_MODULE,}, }, }; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index dd295dbaa074..2e37eb337d48 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -236,7 +236,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", @@ -248,7 +247,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8514, .name = "Vitesse VSC8514", @@ -260,7 +258,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8574, .name = "Vitesse VSC8574", @@ -272,7 +269,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8601, .name = "Vitesse VSC8601", @@ -284,7 +280,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", @@ -296,7 +291,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { /* Vitesse 8221 */ .phy_id = PHY_ID_VSC8221, @@ -309,7 +303,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { /* Vitesse 8211 */ .phy_id = PHY_ID_VSC8211, @@ -322,7 +315,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(vsc82xx_driver); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 040b8978d6ca..9c4b41a4df7d 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -1249,6 +1249,7 @@ static void plip_attach (struct parport *port) struct net_device *dev; struct net_local *nl; char name[IFNAMSIZ]; + struct pardev_cb plip_cb; if ((parport[0] == -1 && (!timid || !port->devices)) || plip_searchfor(parport, port->number)) { @@ -1273,9 +1274,15 @@ static void plip_attach (struct parport *port) nl = netdev_priv(dev); nl->dev = dev; - nl->pardev = parport_register_device(port, dev->name, plip_preempt, - plip_wakeup, plip_interrupt, - 0, dev); + + memset(&plip_cb, 0, sizeof(plip_cb)); + plip_cb.private = dev; + plip_cb.preempt = plip_preempt; + plip_cb.wakeup = plip_wakeup; + plip_cb.irq_func = plip_interrupt; + + nl->pardev = parport_register_dev_model(port, dev->name, + &plip_cb, unit); if (!nl->pardev) { printk(KERN_ERR "%s: parport_register failed\n", name); @@ -1315,10 +1322,23 @@ static void plip_detach (struct parport *port) /* Nothing to do */ } +static int plip_probe(struct pardevice *par_dev) +{ + struct device_driver *drv = par_dev->dev.driver; + int len = strlen(drv->name); + + if (strncmp(par_dev->name, drv->name, len)) + return -ENODEV; + + return 0; +} + static struct parport_driver plip_driver = { - .name = "plip", - .attach = plip_attach, - .detach = plip_detach + .name = "plip", + .probe = plip_probe, + .match_port = plip_attach, + .detach = plip_detach, + .devmodel = true, }; static void __exit plip_cleanup_module (void) @@ -1326,8 +1346,6 @@ static void __exit plip_cleanup_module (void) struct net_device *dev; int i; - parport_unregister_driver (&plip_driver); - for (i=0; i < PLIP_MAX; i++) { if ((dev = dev_plip[i])) { struct net_local *nl = netdev_priv(dev); @@ -1339,6 +1357,8 @@ static void __exit plip_cleanup_module (void) dev_plip[i] = NULL; } } + + parport_unregister_driver(&plip_driver); } #ifndef MODULE diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 9a863c6a6a33..fc8ad001bc94 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1138,9 +1138,15 @@ static const struct net_device_ops ppp_netdev_ops = { .ndo_get_stats64 = ppp_get_stats64, }; +static struct device_type ppp_type = { + .name = "ppp", +}; + static void ppp_setup(struct net_device *dev) { dev->netdev_ops = &ppp_netdev_ops; + SET_NETDEV_DEVTYPE(dev, &ppp_type); + dev->hard_header_len = PPP_HDRLEN; dev->mtu = PPP_MRU; dev->addr_len = 0; @@ -2720,8 +2726,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, int ret = -ENOMEM; int i; - dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_UNKNOWN, - ppp_setup); + dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup); if (!dev) goto out1; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5e0b43283bce..f3c63022eb3c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -311,7 +311,7 @@ static void pppoe_flush_dev(struct net_device *dev) lock_sock(sk); if (po->pppoe_dev == dev && - sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { + sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { pppox_unbind_sock(sk); sk->sk_state_change(sk); po->pppoe_dev = NULL; @@ -500,27 +500,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, pn = pppoe_pernet(dev_net(dev)); po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); - if (po) { - struct sock *sk = sk_pppox(po); - - bh_lock_sock(sk); - - /* If the user has locked the socket, just ignore - * the packet. With the way two rcv protocols hook into - * one socket family type, we cannot (easily) distinguish - * what kind of SKB it is during backlog rcv. - */ - if (sock_owned_by_user(sk) == 0) { - /* We're no longer connect at the PPPOE layer, - * and must wait for ppp channel to disconnect us. - */ - sk->sk_state = PPPOX_ZOMBIE; - } - - bh_unlock_sock(sk); + if (po) if (!schedule_work(&po->proto.pppoe.padt_work)) - sock_put(sk); - } + sock_put(sk_pppox(po)); abort: kfree_skb(skb); @@ -568,6 +550,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern) sk->sk_family = PF_PPPOX; sk->sk_protocol = PX_PROTO_OE; + INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, + pppoe_unbind_sock_work); + return 0; } @@ -632,8 +617,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); - INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); - error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; @@ -663,8 +646,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->pppoe_dev = NULL; } - memset(sk_pppox(po) + 1, 0, - sizeof(struct pppox_sock) - sizeof(struct sock)); + po->pppoe_ifindex = 0; + memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa)); + memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay)); + memset(&po->chan, 0, sizeof(po->chan)); + po->next = NULL; + po->num = 0; + sk->sk_state = PPPOX_NONE; } @@ -793,7 +781,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, struct pppox_sock *relay_po; err = -EBUSY; - if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD)) + if (sk->sk_state & (PPPOX_BOUND | PPPOX_DEAD)) break; err = -ENOTCONN; @@ -1220,4 +1208,4 @@ module_exit(pppoe_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_NETPROTO(PF_PPPOX); +MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OE); diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c index 0e1b30622477..b9c8be6283d3 100644 --- a/drivers/net/ppp/pppox.c +++ b/drivers/net/ppp/pppox.c @@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk) { /* Clear connection to ppp device, if attached. */ - if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { + if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) { ppp_unregister_channel(&pppox_sk(sk)->chan); sk->sk_state = PPPOX_DEAD; } @@ -113,7 +113,7 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol, rc = -EPROTONOSUPPORT; if (!pppox_protos[protocol]) - request_module("pppox-proto-%d", protocol); + request_module("net-pf-%d-proto-%d", PF_PPPOX, protocol); if (!pppox_protos[protocol] || !try_module_get(pppox_protos[protocol]->owner)) goto out; @@ -147,3 +147,4 @@ module_exit(pppox_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_PPPOX); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index fc69e41d0950..90868ca5e341 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, struct pptp_opt *opt = &po->proto.pptp; int error = 0; + if (sockaddr_len < sizeof(struct sockaddr_pppox)) + return -EINVAL; + lock_sock(sk); opt->src_addr = sp->sa_addr.pptp; @@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, struct flowi4 fl4; int error = 0; + if (sockaddr_len < sizeof(struct sockaddr_pppox)) + return -EINVAL; + if (sp->sa_protocol != PX_PROTO_PPTP) return -EINVAL; @@ -718,3 +724,4 @@ module_exit(pptp_exit_module); MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol"); MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 651d35ea22c5..2528331de193 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -91,10 +91,24 @@ void team_modeop_port_change_dev_addr(struct team *team, } EXPORT_SYMBOL(team_modeop_port_change_dev_addr); +static void team_lower_state_changed(struct team_port *port) +{ + struct netdev_lag_lower_state_info info; + + info.link_up = port->linkup; + info.tx_enabled = team_port_enabled(port); + netdev_lower_state_changed(port->dev, &info); +} + static void team_refresh_port_linkup(struct team_port *port) { - port->linkup = port->user.linkup_enabled ? port->user.linkup : - port->state.linkup; + bool new_linkup = port->user.linkup_enabled ? port->user.linkup : + port->state.linkup; + + if (port->linkup != new_linkup) { + port->linkup = new_linkup; + team_lower_state_changed(port); + } } @@ -932,6 +946,7 @@ static void team_port_enable(struct team *team, team->ops.port_enabled(team, port); team_notify_peers(team); team_mcast_rejoin(team); + team_lower_state_changed(port); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -963,16 +978,21 @@ static void team_port_disable(struct team *team, team_adjust_ops(team); team_notify_peers(team); team_mcast_rejoin(team); + team_lower_state_changed(port); } -#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ +#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) +#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_RXCSUM | NETIF_F_ALL_TSO) + static void __team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; @@ -981,6 +1001,11 @@ static void __team_compute_features(struct team *team) vlan_features = netdev_increment_features(vlan_features, port->dev->vlan_features, TEAM_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + port->dev->hw_enc_features, + TEAM_ENC_FEATURES); + dst_release_flag &= port->dev->priv_flags; if (port->dev->hard_header_len > max_hard_header_len) @@ -988,6 +1013,7 @@ static void __team_compute_features(struct team *team) } team->dev->vlan_features = vlan_features; + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; team->dev->hard_header_len = max_hard_header_len; team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -1078,23 +1104,24 @@ static void team_port_disable_netpoll(struct team_port *port) } #endif -static int team_upper_dev_link(struct net_device *dev, - struct net_device *port_dev) +static int team_upper_dev_link(struct team *team, struct team_port *port) { + struct netdev_lag_upper_info lag_upper_info; int err; - err = netdev_master_upper_dev_link(port_dev, dev); + lag_upper_info.tx_type = team->mode->lag_tx_type; + err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, + &lag_upper_info); if (err) return err; - port_dev->priv_flags |= IFF_TEAM_PORT; + port->dev->priv_flags |= IFF_TEAM_PORT; return 0; } -static void team_upper_dev_unlink(struct net_device *dev, - struct net_device *port_dev) +static void team_upper_dev_unlink(struct team *team, struct team_port *port) { - netdev_upper_dev_unlink(port_dev, dev); - port_dev->priv_flags &= ~IFF_TEAM_PORT; + netdev_upper_dev_unlink(port->dev, team->dev); + port->dev->priv_flags &= ~IFF_TEAM_PORT; } static void __team_port_change_port_added(struct team_port *port, bool linkup); @@ -1194,7 +1221,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } - err = team_upper_dev_link(dev, port_dev); + err = team_upper_dev_link(team, port); if (err) { netdev_err(dev, "Device %s failed to set upper link\n", portname); @@ -1220,7 +1247,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return 0; err_option_port_add: - team_upper_dev_unlink(dev, port_dev); + team_upper_dev_unlink(team, port); err_set_upper_link: netdev_rx_handler_unregister(port_dev); @@ -1264,7 +1291,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); - team_upper_dev_unlink(dev, port_dev); + team_upper_dev_unlink(team, port); netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); @@ -2054,6 +2081,7 @@ static void team_setup(struct net_device *dev) dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_TEAM; /* * Indicate we support unicast address filtering. That way core won't @@ -2073,7 +2101,7 @@ static void team_setup(struct net_device *dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); + dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; } @@ -2420,9 +2448,13 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) struct nlattr *nl_option; LIST_HEAD(opt_inst_list); + rtnl_lock(); + team = team_nl_team_get(info); - if (!team) - return -EINVAL; + if (!team) { + err = -EINVAL; + goto rtnl_unlock; + } err = -EINVAL; if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { @@ -2549,7 +2581,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) team_put: team_nl_team_put(team); - +rtnl_unlock: + rtnl_unlock(); return err; } diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c index 40fd3381b693..3f189823ba3b 100644 --- a/drivers/net/team/team_mode_activebackup.c +++ b/drivers/net/team/team_mode_activebackup.c @@ -127,6 +127,7 @@ static const struct team_mode ab_mode = { .owner = THIS_MODULE, .priv_size = sizeof(struct ab_priv), .ops = &ab_mode_ops, + .lag_tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, }; static int __init ab_init_module(void) diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index c366cd299c06..302ff35b0cbc 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -56,6 +56,7 @@ static const struct team_mode bc_mode = { .kind = "broadcast", .owner = THIS_MODULE, .ops = &bc_mode_ops, + .lag_tx_type = NETDEV_LAG_TX_TYPE_BROADCAST, }; static int __init bc_init_module(void) diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index a1536d0d83a9..cdb19b385d42 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -661,6 +661,7 @@ static const struct team_mode lb_mode = { .priv_size = sizeof(struct lb_priv), .port_priv_size = sizeof(struct lb_port_priv), .ops = &lb_mode_ops, + .lag_tx_type = NETDEV_LAG_TX_TYPE_HASH, }; static int __init lb_init_module(void) diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c index cd2f692b8074..215f845782db 100644 --- a/drivers/net/team/team_mode_random.c +++ b/drivers/net/team/team_mode_random.c @@ -46,6 +46,7 @@ static const struct team_mode rnd_mode = { .kind = "random", .owner = THIS_MODULE, .ops = &rnd_mode_ops, + .lag_tx_type = NETDEV_LAG_TX_TYPE_RANDOM, }; static int __init rnd_init_module(void) diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index 53665850b59e..0aa234118c03 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -58,6 +58,7 @@ static const struct team_mode rr_mode = { .owner = THIS_MODULE, .priv_size = sizeof(struct rr_priv), .ops = &rr_mode_ops, + .lag_tx_type = NETDEV_LAG_TX_TYPE_ROUNDROBIN, }; static int __init rr_init_module(void) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f0db770e8b2f..88bb8cc3555b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1095,6 +1095,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u32 rxhash; ssize_t n; + if (!(tun->dev->flags & IFF_UP)) + return -EIO; + if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index bd9acff1eb7b..0c5c22b84da8 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -118,7 +118,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, return 0; } if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { - netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", + netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", size); return 0; } diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 5f18fcb8dcc7..224e7d82de6d 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -98,7 +98,7 @@ static void ax88172a_status(struct usbnet *dev, struct urb *urb) static int ax88172a_init_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; - int ret, i; + int ret; priv->mdio = mdiobus_alloc(); if (!priv->mdio) { @@ -114,25 +114,15 @@ static int ax88172a_init_mdio(struct usbnet *dev) snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mdio->irq) { - ret = -ENOMEM; - goto mfree; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - priv->mdio->irq[i] = PHY_POLL; - ret = mdiobus_register(priv->mdio); if (ret) { netdev_err(dev->net, "Could not register MDIO bus\n"); - goto ifree; + goto mfree; } netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id); return 0; -ifree: - kfree(priv->mdio->irq); mfree: mdiobus_free(priv->mdio); return ret; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3da70bf9936a..7cba2c3759df 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) info->u = header.usb_cdc_union_desc; info->header = header.usb_cdc_header_desc; info->ether = header.usb_cdc_ether_desc; + if (!info->u) { + if (rndis) + goto skip; + else /* in that case a quirk is mandatory */ + goto bad_desc; + } /* we need a master/control interface (what we're * probed with) and a slave/data interface; union * descriptors sort this all out. @@ -256,7 +262,7 @@ skip: goto bad_desc; } - } else if (!info->header || !info->u || (!rndis && !info->ether)) { + } else if (!info->header || (!rndis && !info->ether)) { dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", info->header ? "" : "header ", info->u ? "" : "union ", diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index bbde9884ab8a..bdd83d95ec0a 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_change_mtu = usbnet_change_mtu, + .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid, @@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; - ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data); if (ret) goto err; @@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = { .tx_fixup = cdc_mbim_tx_fixup, }; +/* The spefication explicitly allows NDPs to be placed anywhere in the + * frame, but some devices fail unless the NDP is placed after the IP + * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this + * behaviour. + * + * Note: The current implementation of this feature restricts each NTB + * to a single NDP, implying that multiplexed sessions cannot share an + * NTB. This might affect performace for multiplexed sessions. + */ +static const struct driver_info cdc_mbim_info_ndp_to_end = { + .description = "CDC MBIM", + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, + .bind = cdc_mbim_bind, + .unbind = cdc_mbim_unbind, + .manage_power = cdc_mbim_manage_power, + .rx_fixup = cdc_mbim_rx_fixup, + .tx_fixup = cdc_mbim_tx_fixup, + .data = CDC_NCM_FLAG_NDP_TO_END, +}; + static const struct usb_device_id mbim_devs[] = { /* This duplicate NCM entry is intentional. MBIM devices can * be disguised as NCM by default, and this is necessary to @@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info, }, + /* Huawei E3372 fails unless NDP comes after the IP packets */ + { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, + }, /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 3b1ba8237768..dc0212c3cc28 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -41,6 +41,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ctype.h> +#include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> @@ -283,6 +284,48 @@ static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max); static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs); +static ssize_t ndp_to_end_show(struct device *d, struct device_attribute *attr, char *buf) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + + return sprintf(buf, "%c\n", ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END ? 'Y' : 'N'); +} + +static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + bool enable; + + if (strtobool(buf, &enable)) + return -EINVAL; + + /* no change? */ + if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) + return len; + + if (enable && !ctx->delayed_ndp16) { + ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); + if (!ctx->delayed_ndp16) + return -ENOMEM; + } + + /* flush pending data before changing flag */ + netif_tx_lock_bh(dev->net); + usbnet_start_xmit(NULL, dev->net); + spin_lock_bh(&ctx->mtx); + if (enable) + ctx->drvflags |= CDC_NCM_FLAG_NDP_TO_END; + else + ctx->drvflags &= ~CDC_NCM_FLAG_NDP_TO_END; + spin_unlock_bh(&ctx->mtx); + netif_tx_unlock_bh(dev->net); + + return len; +} +static DEVICE_ATTR_RW(ndp_to_end); + #define NCM_PARM_ATTR(name, format, tocpu) \ static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \ { \ @@ -305,6 +348,7 @@ NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu); static struct attribute *cdc_ncm_sysfs_attrs[] = { &dev_attr_min_tx_pkt.attr, + &dev_attr_ndp_to_end.attr, &dev_attr_rx_max.attr, &dev_attr_tx_max.attr, &dev_attr_tx_timer_usecs.attr, @@ -689,6 +733,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) kfree(ctx); } +/* we need to override the usbnet change_mtu ndo for two reasons: + * - respect the negotiated maximum datagram size + * - avoid unwanted changes to rx and tx buffers + */ +int cdc_ncm_change_mtu(struct net_device *net, int new_mtu) +{ + struct usbnet *dev = netdev_priv(net); + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev); + + if (new_mtu <= 0 || new_mtu > maxmtu) + return -EINVAL; + net->mtu = new_mtu; + return 0; +} +EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu); + +static const struct net_device_ops cdc_ncm_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = cdc_ncm_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) { struct cdc_ncm_ctx *ctx; @@ -823,6 +894,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ /* add our sysfs attrs */ dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group; + /* must handle MTU changes */ + dev->net->netdev_ops = &cdc_ncm_netdev_ops; + return 0; error2: @@ -955,10 +1029,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and * the wNdpIndex field in the header is actually not consistent with reality. It will be later. */ - if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { if (ctx->delayed_ndp16->dwSignature == sign) return ctx->delayed_ndp16; + /* We can only push a single NDP to the end. Return + * NULL to send what we've already got and queue this + * skb for later. + */ + else if (ctx->delayed_ndp16->dwSignature) + return NULL; + } + /* follow the chain of NDPs, looking for a match */ while (ndpoffset) { ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); @@ -1550,6 +1632,24 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* DW5812 LTE Verizon Mobile Broadband Card + * Unlike DW5550 this device requires FLAG_NOARP + */ + { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_noarp_info, + }, + + /* DW5813 LTE AT&T Mobile Broadband Card + * Unlike DW5550 this device requires FLAG_NOARP + */ + { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_noarp_info, + }, + /* Dell branded MBM devices like DW5550 */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 226668ead0d8..2ed53331bfb2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -603,6 +603,59 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, return 0; } +static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + int i; + int ret; + u32 buf; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_PWR_DN completion"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + /* set to BYTE program mode */ + ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); + ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "Timeout on OTP_STATUS completion"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + } + + return 0; +} + static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { @@ -969,7 +1022,7 @@ static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); return -EINVAL; } @@ -1458,12 +1511,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!dev->mdiobus->irq) { - ret = -ENOMEM; - goto exit1; - } - /* handle our own interrupt */ for (i = 0; i < PHY_MAX_ADDR; i++) dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT; @@ -1479,13 +1526,11 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) ret = mdiobus_register(dev->mdiobus); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); - goto exit2; + goto exit1; } netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id); return 0; -exit2: - kfree(dev->mdiobus->irq); exit1: mdiobus_free(dev->mdiobus); return ret; @@ -1494,7 +1539,6 @@ exit1: static void lan78xx_remove_mdio(struct lan78xx_net *dev) { mdiobus_unregister(dev->mdiobus); - kfree(dev->mdiobus->irq); mdiobus_free(dev->mdiobus); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9a5be8b85186..23e9880791fc 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -14,7 +14,9 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> +#include <linux/if_arp.h> #include <linux/mii.h> +#include <linux/rtnetlink.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> @@ -48,11 +50,100 @@ struct qmi_wwan_state { struct usb_driver *subdriver; atomic_t pmcount; - unsigned long unused; + unsigned long flags; struct usb_interface *control; struct usb_interface *data; }; +enum qmi_wwan_flags { + QMI_WWAN_FLAG_RAWIP = 1 << 0, +}; + +static void qmi_wwan_netdev_setup(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + struct qmi_wwan_state *info = (void *)&dev->data; + + if (info->flags & QMI_WWAN_FLAG_RAWIP) { + net->header_ops = NULL; /* No header */ + net->type = ARPHRD_NONE; + net->hard_header_len = 0; + net->addr_len = 0; + net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + netdev_dbg(net, "mode: raw IP\n"); + } else if (!net->header_ops) { /* don't bother if already set */ + ether_setup(net); + netdev_dbg(net, "mode: Ethernet\n"); + } + + /* recalculate buffers after changing hard_header_len */ + usbnet_change_mtu(net, net->mtu); +} + +static ssize_t raw_ip_show(struct device *d, struct device_attribute *attr, char *buf) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct qmi_wwan_state *info = (void *)&dev->data; + + return sprintf(buf, "%c\n", info->flags & QMI_WWAN_FLAG_RAWIP ? 'Y' : 'N'); +} + +static ssize_t raw_ip_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct qmi_wwan_state *info = (void *)&dev->data; + bool enable; + int ret; + + if (strtobool(buf, &enable)) + return -EINVAL; + + /* no change? */ + if (enable == (info->flags & QMI_WWAN_FLAG_RAWIP)) + return len; + + if (!rtnl_trylock()) + return restart_syscall(); + + /* we don't want to modify a running netdev */ + if (netif_running(dev->net)) { + netdev_err(dev->net, "Cannot change a running device\n"); + ret = -EBUSY; + goto err; + } + + /* let other drivers deny the change */ + ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev->net); + ret = notifier_to_errno(ret); + if (ret) { + netdev_err(dev->net, "Type change was refused\n"); + goto err; + } + + if (enable) + info->flags |= QMI_WWAN_FLAG_RAWIP; + else + info->flags &= ~QMI_WWAN_FLAG_RAWIP; + qmi_wwan_netdev_setup(dev->net); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev->net); + ret = len; +err: + rtnl_unlock(); + return ret; +} + +static DEVICE_ATTR_RW(raw_ip); + +static struct attribute *qmi_wwan_sysfs_attrs[] = { + &dev_attr_raw_ip.attr, + NULL, +}; + +static struct attribute_group qmi_wwan_sysfs_attr_group = { + .name = "qmi", + .attrs = qmi_wwan_sysfs_attrs, +}; + /* default ethernet address used by the modem */ static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; @@ -80,6 +171,8 @@ static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; */ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + struct qmi_wwan_state *info = (void *)&dev->data; + bool rawip = info->flags & QMI_WWAN_FLAG_RAWIP; __be16 proto; /* This check is no longer done by usbnet */ @@ -94,15 +187,25 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) proto = htons(ETH_P_IPV6); break; case 0x00: + if (rawip) + return 0; if (is_multicast_ether_addr(skb->data)) return 1; /* possibly bogus destination - rewrite just in case */ skb_reset_mac_header(skb); goto fix_dest; default: + if (rawip) + return 0; /* pass along other packets without modifications */ return 1; } + if (rawip) { + skb->dev = dev->net; /* normally set by eth_type_trans */ + skb->protocol = proto; + return 1; + } + if (skb_headroom(skb) < ETH_HLEN) return 0; skb_push(skb, ETH_HLEN); @@ -223,6 +326,20 @@ err: return rv; } +/* Send CDC SetControlLineState request, setting or clearing the DTR. + * "Required for Autoconnect and 9x30 to wake up" according to the + * GobiNet driver. The requirement has been verified on an MDM9230 + * based Sierra Wireless MC7455 + */ +static int qmi_wwan_change_dtr(struct usbnet *dev, bool on) +{ + u8 intf = dev->intf->cur_altsetting->desc.bInterfaceNumber; + + return usbnet_write_cmd(dev, USB_CDC_REQ_SET_CONTROL_LINE_STATE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + on ? 0x01 : 0x00, intf, NULL, 0); +} + static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) { int status = -1; @@ -257,7 +374,10 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) "bogus CDC Union: master=%u, slave=%u\n", cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); - goto err; + + /* ignore and continue... */ + cdc_union = NULL; + info->data = intf; } } @@ -280,6 +400,24 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) usb_driver_release_interface(driver, info->data); } + /* disabling remote wakeup on MDM9x30 devices has the same + * effect as clearing DTR. The device will not respond to QMI + * requests until we set DTR again. This is similar to a + * QMI_CTL SYNC request, clearing a lot of firmware state + * including the client ID allocations. + * + * Our usage model allows a session to span multiple + * open/close events, so we must prevent the firmware from + * clearing out state the clients might need. + * + * MDM9x30 is the first QMI chipset with USB3 support. Abuse + * this fact to enable the quirk. + */ + if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) { + qmi_wwan_manage_power(dev, 1); + qmi_wwan_change_dtr(dev, true); + } + /* Never use the same address on both ends of the link, even if the * buggy firmware told us to. Or, if device is assigned the well-known * buggy firmware MAC address, replace it with a random address, @@ -294,6 +432,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ } dev->net->netdev_ops = &qmi_wwan_netdev_ops; + dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group; err: return status; } @@ -307,6 +446,12 @@ static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) if (info->subdriver && info->subdriver->disconnect) info->subdriver->disconnect(info->control); + /* disable MDM9x30 quirk */ + if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) { + qmi_wwan_change_dtr(dev, false); + qmi_wwan_manage_power(dev, 0); + } + /* allow user to unbind using either control or data */ if (intf == info->control) other = info->data; @@ -715,8 +860,6 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ - {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ @@ -742,6 +885,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ + {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d9427ca3dba7..d1f78c2c97aa 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -25,12 +25,13 @@ #include <uapi/linux/mdio.h> #include <linux/mdio.h> #include <linux/usb/cdc.h> +#include <linux/suspend.h> /* Information for net-next */ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "2" +#define NET_VERSION "3" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -604,6 +605,9 @@ struct r8152 { struct delayed_work schedule; struct mii_if_info mii; struct mutex control; /* use for hw setting */ +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notifier; +#endif struct rtl_ops { void (*init)(struct r8152 *); @@ -1938,7 +1942,6 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) __le32 tmp[2]; u32 ocp_data; - clear_bit(RTL8152_SET_RX_MODE, &tp->flags); netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; @@ -1986,7 +1989,7 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, int offset = skb_transport_offset(skb); if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) - features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) features &= ~NETIF_F_GSO_MASK; @@ -2424,8 +2427,6 @@ static void rtl_phy_reset(struct r8152 *tp) u16 data; int i; - clear_bit(PHY_RESET, &tp->flags); - data = r8152_mdio_read(tp, MII_BMCR); /* don't reset again before the previous one complete */ @@ -2455,23 +2456,23 @@ static void r8153_teredo_off(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); } -static void r8152b_disable_aldps(struct r8152 *tp) -{ - ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); - msleep(20); -} - -static inline void r8152b_enable_aldps(struct r8152 *tp) +static void r8152_aldps_en(struct r8152 *tp, bool enable) { - ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | - LINKENA | DIS_SDSAVE); + if (enable) { + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | + LINKENA | DIS_SDSAVE); + } else { + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | + DIS_SDSAVE); + msleep(20); + } } static void rtl8152_disable(struct r8152 *tp) { - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); rtl_disable(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void r8152b_hw_phy_cfg(struct r8152 *tp) @@ -2783,30 +2784,26 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } -static void r8153_disable_aldps(struct r8152 *tp) +static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; data = ocp_reg_read(tp, OCP_POWER_CFG); - data &= ~EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); - msleep(20); -} - -static void r8153_enable_aldps(struct r8152 *tp) -{ - u16 data; - - data = ocp_reg_read(tp, OCP_POWER_CFG); - data |= EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); + if (enable) { + data |= EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + } else { + data &= ~EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + msleep(20); + } } static void rtl8153_disable(struct r8152 *tp) { - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); rtl_disable(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); usb_enable_lpm(tp->udev); } @@ -2884,10 +2881,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); - if (test_bit(PHY_RESET, &tp->flags)) { + if (test_and_clear_bit(PHY_RESET, &tp->flags)) { int i; - clear_bit(PHY_RESET, &tp->flags); for (i = 0; i < 50; i++) { msleep(20); if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) @@ -2896,7 +2892,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) } out: - return ret; } @@ -2905,9 +2900,9 @@ static void rtl8152_up(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); r8152b_exit_oob(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void rtl8152_down(struct r8152 *tp) @@ -2918,9 +2913,9 @@ static void rtl8152_down(struct r8152 *tp) } r8152_power_cut_en(tp, false); - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); r8152b_enter_oob(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void rtl8153_up(struct r8152 *tp) @@ -2929,9 +2924,9 @@ static void rtl8153_up(struct r8152 *tp) return; r8153_u1u2en(tp, false); - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_first_init(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); @@ -2947,9 +2942,9 @@ static void rtl8153_down(struct r8152 *tp) r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_enter_oob(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) @@ -2983,7 +2978,6 @@ static void set_carrier(struct r8152 *tp) struct net_device *netdev = tp->netdev; u8 speed; - clear_bit(RTL8152_LINK_CHG, &tp->flags); speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { @@ -3026,20 +3020,18 @@ static void rtl_work_func_t(struct work_struct *work) goto out1; } - if (test_bit(RTL8152_LINK_CHG, &tp->flags)) + if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); - if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) + if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); /* don't schedule napi before linking */ - if (test_bit(SCHEDULE_NAPI, &tp->flags) && - netif_carrier_ok(tp->netdev)) { - clear_bit(SCHEDULE_NAPI, &tp->flags); + if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && + netif_carrier_ok(tp->netdev)) napi_schedule(&tp->napi); - } - if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) rtl_phy_reset(tp); mutex_unlock(&tp->control); @@ -3048,6 +3040,33 @@ out1: usb_autopm_put_interface(tp->intf); } +#ifdef CONFIG_PM_SLEEP +static int rtl_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + usb_autopm_get_interface(tp->intf); + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + usb_autopm_put_interface(tp->intf); + break; + + case PM_POST_RESTORE: + case PM_RESTORE_PREPARE: + default: + break; + } + + return NOTIFY_DONE; +} +#endif + static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); @@ -3067,17 +3086,6 @@ static int rtl8152_open(struct net_device *netdev) mutex_lock(&tp->control); - /* The WORK_ENABLE may be set when autoresume occurs */ - if (test_bit(WORK_ENABLE, &tp->flags)) { - clear_bit(WORK_ENABLE, &tp->flags); - usb_kill_urb(tp->intr_urb); - cancel_delayed_work_sync(&tp->schedule); - - /* disable the tx/rx, if the workqueue has enabled them. */ - if (netif_carrier_ok(netdev)) - tp->rtl_ops.disable(tp); - } - tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, @@ -3101,6 +3109,10 @@ static int rtl8152_open(struct net_device *netdev) mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); +#ifdef CONFIG_PM_SLEEP + tp->pm_notifier.notifier_call = rtl_notifier; + register_pm_notifier(&tp->pm_notifier); +#endif out: return res; @@ -3111,6 +3123,9 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&tp->pm_notifier); +#endif napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); @@ -3124,12 +3139,6 @@ static int rtl8152_close(struct net_device *netdev) } else { mutex_lock(&tp->control); - /* The autosuspend may have been enabled and wouldn't - * be disable when autoresume occurs, because the - * netif_running() would be false. - */ - rtl_runtime_suspend_enable(tp, false); - tp->rtl_ops.down(tp); mutex_unlock(&tp->control); @@ -3255,7 +3264,7 @@ static void r8152b_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); @@ -3277,7 +3286,7 @@ static void r8152b_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); r8152b_enable_eee(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); r8152b_enable_fc(tp); rtl_tally_reset(tp); @@ -3295,7 +3304,7 @@ static void r8153_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { @@ -3384,7 +3393,7 @@ static void r8153_init(struct r8152 *tp) EEE_SPDWN_EN); r8153_enable_eee(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); r8152b_enable_fc(tp); rtl_tally_reset(tp); r8153_u2p3en(tp, true); @@ -3512,7 +3521,7 @@ static int rtl8152_resume(struct usb_interface *intf) netif_device_attach(tp->netdev); } - if (netif_running(tp->netdev)) { + if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); @@ -3532,6 +3541,8 @@ static int rtl8152_resume(struct usb_interface *intf) } usb_submit_urb(tp->intr_urb, GFP_KERNEL); } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + if (tp->netdev->flags & IFF_UP) + rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } @@ -3540,6 +3551,14 @@ static int rtl8152_resume(struct usb_interface *intf) return 0; } +static int rtl8152_reset_resume(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + return rtl8152_resume(intf); +} + static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); @@ -4291,7 +4310,7 @@ static struct usb_driver rtl8152_driver = { .disconnect = rtl8152_disconnect, .suspend = rtl8152_suspend, .resume = rtl8152_resume, - .reset_resume = rtl8152_resume, + .reset_resume = rtl8152_reset_resume, .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 0744bf2ef2d6..0b0ba7ef14e4 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -324,7 +324,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) return; } - skb->protocol = eth_type_trans (skb, dev->net); + /* only update if unset to allow minidriver rx_fixup override */ + if (skb->protocol == 0) + skb->protocol = eth_type_trans (skb, dev->net); + dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 0ef4a5ad5557..ba21d072be31 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) kfree_skb(skb); goto drop; } - /* don't change ip_summed == CHECKSUM_PARTIAL, as that - * will cause bad checksum on forwarded packets - */ - if (skb->ip_summed == CHECKSUM_NONE && - rcv->features & NETIF_F_RXCSUM) - skb->ip_summed = CHECKSUM_UNNECESSARY; if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f94ab786088f..767ab11a6e9f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -522,8 +522,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb_shinfo(skb)->gso_segs = 0; } - skb_mark_napi_id(skb, &rq->napi); - napi_gro_receive(&rq->napi, skb); return; @@ -1616,7 +1614,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); - napi_hash_add(&vi->rq[i].napi); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 417903715437..0cbf520cea77 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1380,10 +1380,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } - new_dma_addr = dma_map_page(&adapter->pdev->dev - , rbi->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + new_dma_addr = dma_map_page(&adapter->pdev->dev, + new_page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); if (dma_mapping_error(&adapter->pdev->dev, new_dma_addr)) { put_page(new_page); diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 4c58c83dc225..bdb8a6c0f8aa 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040400 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 4f9748457f5a..66addb7a7911 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -46,17 +46,7 @@ #define vrf_master_get_rcu(dev) \ ((struct net_device *)rcu_dereference(dev->rx_handler_data)) -struct slave { - struct list_head list; - struct net_device *dev; -}; - -struct slave_queue { - struct list_head all_slaves; -}; - struct net_vrf { - struct slave_queue queue; struct rtable *rth; struct rt6_info *rt6; u32 tb_id; @@ -621,42 +611,9 @@ static void cycle_netdev(struct net_device *dev) } } -static struct slave *__vrf_find_slave_dev(struct slave_queue *queue, - struct net_device *dev) -{ - struct list_head *head = &queue->all_slaves; - struct slave *slave; - - list_for_each_entry(slave, head, list) { - if (slave->dev == dev) - return slave; - } - - return NULL; -} - -/* inverse of __vrf_insert_slave */ -static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave) -{ - list_del(&slave->list); -} - -static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave) -{ - list_add(&slave->list, &queue->all_slaves); -} - static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { - struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL); - struct net_vrf *vrf = netdev_priv(dev); - struct slave_queue *queue = &vrf->queue; - int ret = -ENOMEM; - - if (!slave) - goto out_fail; - - slave->dev = port_dev; + int ret; /* register the packet handler for slave ports */ ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); @@ -667,12 +624,11 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) goto out_fail; } - ret = netdev_master_upper_dev_link(port_dev, dev); + ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); if (ret < 0) goto out_unregister; port_dev->priv_flags |= IFF_L3MDEV_SLAVE; - __vrf_insert_slave(queue, slave); cycle_netdev(port_dev); return 0; @@ -680,7 +636,6 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) out_unregister: netdev_rx_handler_unregister(port_dev); out_fail: - kfree(slave); return ret; } @@ -695,10 +650,6 @@ static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) /* inverse of do_vrf_add_slave */ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { - struct net_vrf *vrf = netdev_priv(dev); - struct slave_queue *queue = &vrf->queue; - struct slave *slave; - netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; @@ -706,12 +657,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) cycle_netdev(port_dev); - slave = __vrf_find_slave_dev(queue, port_dev); - if (slave) - __vrf_remove_slave(queue, slave); - - kfree(slave); - return 0; } @@ -723,15 +668,14 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct slave_queue *queue = &vrf->queue; - struct list_head *head = &queue->all_slaves; - struct slave *slave, *next; + struct net_device *port_dev; + struct list_head *iter; vrf_rtable_destroy(vrf); vrf_rt6_destroy(vrf); - list_for_each_entry_safe(slave, next, head, list) - vrf_del_slave(dev, slave->dev); + netdev_for_each_lower_dev(dev, port_dev, iter) + vrf_del_slave(dev, port_dev); free_percpu(dev->dstats); dev->dstats = NULL; @@ -741,8 +685,6 @@ static int vrf_dev_init(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - INIT_LIST_HEAD(&vrf->queue.all_slaves); - dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) goto out_nomem; @@ -800,7 +742,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev, } /* called under rcu_read_lock */ -static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) +static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) { struct fib_result res = { .tclassid = 0 }; struct net *net = dev_net(dev); @@ -808,9 +750,10 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) u8 flags = fl4->flowi4_flags; u8 scope = fl4->flowi4_scope; u8 tos = RT_FL_TOS(fl4); + int rc; if (unlikely(!fl4->daddr)) - return; + return 0; fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF; fl4->flowi4_iif = LOOPBACK_IFINDEX; @@ -818,7 +761,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); - if (!fib_lookup(net, fl4, &res, 0)) { + rc = fib_lookup(net, fl4, &res, 0); + if (!rc) { if (res.type == RTN_LOCAL) fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr; else @@ -828,6 +772,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_flags = flags; fl4->flowi4_tos = orig_tos; fl4->flowi4_scope = scope; + + return rc; } #if IS_ENABLED(CONFIG_IPV6) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6369a5734d4c..2d88c799d2ac 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -621,7 +621,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) int err; if (sa_family == AF_INET) { - err = udp_add_offload(&vs->udp_offloads); + err = udp_add_offload(net, &vs->udp_offloads); if (err) pr_warn("vxlan: udp_add_offload failed with status %d\n", err); } @@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, struct pcpu_sw_netstats *stats; union vxlan_addr saddr; int err = 0; - union vxlan_addr *remote_ip; /* For flow based devices, map all packets to VNI 0 */ if (vs->flags & VXLAN_F_COLLECT_METADATA) @@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, if (!vxlan) goto drop; - remote_ip = &vxlan->default_dst.remote_ip; skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); @@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) goto drop; - /* Re-examine inner Ethernet packet */ - if (remote_ip->sa.sa_family == AF_INET) { + /* Get data from the outer IP header */ + if (vxlan_get_sk_family(vs) == AF_INET) { oip = ip_hdr(skb); saddr.sin.sin_addr.s_addr = oip->saddr; saddr.sa.sa_family = AF_INET; @@ -1843,11 +1841,40 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, - ttl, df, src_port, dst_port, xnet, - !(vxflags & VXLAN_F_UDP_CSUM)); + udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df, + src_port, dst_port, xnet, + !(vxflags & VXLAN_F_UDP_CSUM)); + return 0; } +#if IS_ENABLED(CONFIG_IPV6) +static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, + struct sk_buff *skb, int oif, + const struct in6_addr *daddr, + struct in6_addr *saddr) +{ + struct dst_entry *ndst; + struct flowi6 fl6; + int err; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = oif; + fl6.daddr = *daddr; + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = IPPROTO_UDP; + + err = ipv6_stub->ipv6_dst_lookup(vxlan->net, + vxlan->vn6_sock->sock->sk, + &ndst, &fl6); + if (err < 0) + return ERR_PTR(err); + + *saddr = fl6.saddr; + return ndst; +} +#endif + /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan) @@ -2030,26 +2057,20 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb = NULL; goto rt_tx_error; } - - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; - struct flowi6 fl6; + struct in6_addr saddr; u32 rt6i_flags; if (!vxlan->vn6_sock) goto drop; sk = vxlan->vn6_sock->sock->sk; - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; - fl6.daddr = dst->sin6.sin6_addr; - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; - fl6.flowi6_mark = skb->mark; - fl6.flowi6_proto = IPPROTO_UDP; - - if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { + ndst = vxlan6_get_route(vxlan, skb, + rdst ? rdst->remote_ifindex : 0, + &dst->sin6.sin6_addr, &saddr); + if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); dev->stats.tx_carrier_errors++; @@ -2081,7 +2102,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), flags); @@ -2395,9 +2416,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) vxlan->cfg.port_max, true); dport = info->key.tp_dst ? : vxlan->cfg.dst_port; - if (ip_tunnel_info_af(info) == AF_INET) + if (ip_tunnel_info_af(info) == AF_INET) { + if (!vxlan->vn4_sock) + return -EINVAL; return egress_ipv4_tun_info(dev, skb, info, sport, dport); - return -EINVAL; + } else { +#if IS_ENABLED(CONFIG_IPV6) + struct dst_entry *ndst; + + if (!vxlan->vn6_sock) + return -EINVAL; + ndst = vxlan6_get_route(vxlan, skb, 0, + &info->key.u.ipv6.dst, + &info->key.u.ipv6.src); + if (IS_ERR(ndst)) + return PTR_ERR(ndst); + dst_release(ndst); + + info->key.tp_src = sport; + info->key.tp_dst = dport; +#else /* !CONFIG_IPV6 */ + return -EPFNOSUPPORT; +#endif + } + return 0; } static const struct net_device_ops vxlan_netdev_ops = { @@ -2708,7 +2750,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, struct vxlan_config *conf) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; struct vxlan_rdst *dst = &vxlan->default_dst; unsigned short needed_headroom = ETH_HLEN; int err; @@ -2774,9 +2816,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, if (!vxlan->cfg.age_interval) vxlan->cfg.age_interval = FDB_AGE_DEFAULT; - if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET, - vxlan->cfg.dst_port, vxlan->flags)) + list_for_each_entry(tmp, &vn->vxlan_list, next) { + if (tmp->cfg.vni == conf->vni && + (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || + tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && + tmp->cfg.dst_port == vxlan->cfg.dst_port && + (tmp->flags & VXLAN_F_RCV_FLAGS) == + (vxlan->flags & VXLAN_F_RCV_FLAGS)) return -EEXIST; + } dev->ethtool_ops = &vxlan_ethtool_ops; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 51f6cee8aab2..9bd4aa8083ce 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -266,8 +266,8 @@ struct net_device *alloc_hdlcdev(void *priv) void unregister_hdlc_device(struct net_device *dev) { rtnl_lock(); - unregister_netdevice(dev); detach_hdlc_protocol(dev); + unregister_netdevice(dev); rtnl_unlock(); } @@ -276,7 +276,11 @@ void unregister_hdlc_device(struct net_device *dev) int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size) { - detach_hdlc_protocol(dev); + int err; + + err = detach_hdlc_protocol(dev); + if (err) + return err; if (!try_module_get(proto->module)) return -ENOSYS; @@ -289,15 +293,24 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, } } dev_to_hdlc(dev)->proto = proto; + return 0; } -void detach_hdlc_protocol(struct net_device *dev) +int detach_hdlc_protocol(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); + int err; if (hdlc->proto) { + err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); + err = notifier_to_errno(err); + if (err) { + netdev_err(dev, "Refused to change device type\n"); + return err; + } + if (hdlc->proto->detach) hdlc->proto->detach(dev); module_put(hdlc->proto->module); @@ -306,6 +319,8 @@ void detach_hdlc_protocol(struct net_device *dev) kfree(hdlc->state); hdlc->state = NULL; hdlc_setup_dev(dev); + + return 0; } diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 3f20808b5ff8..a408abc25512 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -378,6 +378,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; dev->type = ARPHRD_CISCO; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); return 0; } diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 89541cc90e87..b6e0cfb095d3 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1240,6 +1240,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) } memcpy(&state(hdlc)->settings, &new_settings, size); dev->type = ARPHRD_FRAD; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); return 0; case IF_PROTO_FR_ADD_PVC: diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d7645581f91..47fdb87d3567 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -687,6 +687,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) dev->hard_header_len = sizeof(struct hdlc_header); dev->header_ops = &ppp_header_ops; dev->type = ARPHRD_PPP; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); return 0; } diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 5dc153e8a29d..4feb45001aac 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -84,6 +84,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(hdlc->state, &new_settings, size); dev->type = ARPHRD_RAWHDLC; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 3ab72b3082de..2f11836078ab 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -102,6 +102,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) ether_setup(dev); dev->tx_queue_len = old_qlen; eth_hw_addr_random(dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index a49aec5efd20..e867638067a6 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -213,6 +213,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) if ((result = attach_hdlc_protocol(dev, &proto, 0))) return result; dev->type = ARPHRD_X25; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index e73f13857846..a20d688d2595 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -586,6 +586,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) || pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) { pr_err("No usable DMA configuration\n"); + pci_disable_device(pdev); return -EIO; } diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f9f94229bf1b..8c8edaf1bba6 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -17,6 +17,22 @@ menuconfig WLAN if WLAN +source "drivers/net/wireless/admtek/Kconfig" +source "drivers/net/wireless/ath/Kconfig" +source "drivers/net/wireless/atmel/Kconfig" +source "drivers/net/wireless/broadcom/Kconfig" +source "drivers/net/wireless/cisco/Kconfig" +source "drivers/net/wireless/intel/Kconfig" +source "drivers/net/wireless/intersil/Kconfig" +source "drivers/net/wireless/marvell/Kconfig" +source "drivers/net/wireless/mediatek/Kconfig" +source "drivers/net/wireless/ralink/Kconfig" +source "drivers/net/wireless/realtek/Kconfig" +source "drivers/net/wireless/rsi/Kconfig" +source "drivers/net/wireless/st/Kconfig" +source "drivers/net/wireless/ti/Kconfig" +source "drivers/net/wireless/zydas/Kconfig" + config PCMCIA_RAYCS tristate "Aviator/Raytheon 2.4GHz wireless support" depends on PCMCIA @@ -32,110 +48,6 @@ config PCMCIA_RAYCS To compile this driver as a module, choose M here: the module will be called ray_cs. If unsure, say N. -config LIBERTAS_THINFIRM - tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware" - depends on MAC80211 - select FW_LOADER - ---help--- - A library for Marvell Libertas 8xxx devices using thinfirm. - -config LIBERTAS_THINFIRM_DEBUG - bool "Enable full debugging output in the Libertas thin firmware module." - depends on LIBERTAS_THINFIRM - ---help--- - Debugging support. - -config LIBERTAS_THINFIRM_USB - tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware" - depends on LIBERTAS_THINFIRM && USB - ---help--- - A driver for Marvell Libertas 8388 USB devices using thinfirm. - -config AIRO - tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" - depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN) - select WIRELESS_EXT - select CRYPTO - select WEXT_SPY - select WEXT_PRIV - ---help--- - This is the standard Linux driver to support Cisco/Aironet ISA and - PCI 802.11 wireless cards. - It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X - - with or without encryption) as well as card before the Cisco - acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). - - This driver support both the standard Linux Wireless Extensions - and Cisco proprietary API, so both the Linux Wireless Tools and the - Cisco Linux utilities can be used to configure the card. - - The driver can be compiled as a module and will be named "airo". - -config ATMEL - tristate "Atmel at76c50x chipset 802.11b support" - depends on CFG80211 && (PCI || PCMCIA) - select WIRELESS_EXT - select WEXT_PRIV - select FW_LOADER - select CRC32 - ---help--- - A driver 802.11b wireless cards based on the Atmel fast-vnet - chips. This driver supports standard Linux wireless extensions. - - Many cards based on this chipset do not have flash memory - and need their firmware loaded at start-up. If yours is - one of these, you will need to provide a firmware image - to be loaded into the card by the driver. The Atmel - firmware package can be downloaded from - <http://www.thekelleys.org.uk/atmel> - -config PCI_ATMEL - tristate "Atmel at76c506 PCI cards" - depends on ATMEL && PCI - ---help--- - Enable support for PCI and mini-PCI cards containing the - Atmel at76c506 chip. - -config PCMCIA_ATMEL - tristate "Atmel at76c502/at76c504 PCMCIA cards" - depends on ATMEL && PCMCIA - select WIRELESS_EXT - select FW_LOADER - select CRC32 - ---help--- - Enable support for PCMCIA cards containing the - Atmel at76c502 and at76c504 chips. - -config AT76C50X_USB - tristate "Atmel at76c503/at76c505/at76c505a USB cards" - depends on MAC80211 && USB - select FW_LOADER - ---help--- - Enable support for USB Wireless devices using Atmel at76c503, - at76c505 or at76c505a chips. - -config AIRO_CS - tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" - depends on CFG80211 && PCMCIA && (BROKEN || !M32R) - select WIRELESS_EXT - select WEXT_SPY - select WEXT_PRIV - select CRYPTO - select CRYPTO_AES - ---help--- - This is the standard Linux driver to support Cisco/Aironet PCMCIA - 802.11 wireless cards. This driver is the same as the Aironet - driver part of the Linux Pcmcia package. - It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X - - with or without encryption) as well as card before the Cisco - acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also - supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom - 802.11b cards. - - This driver support both the standard Linux Wireless Extensions - and Cisco proprietary API, so both the Linux Wireless Tools and the - Cisco Linux utilities can be used to configure the card. - config PCMCIA_WL3501 tristate "Planet WL3501 PCMCIA cards" depends on CFG80211 && PCMCIA @@ -146,44 +58,18 @@ config PCMCIA_WL3501 It has basic support for Linux wireless extensions and initial micro support for ethtool. -config PRISM54 - tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)' - depends on PCI - select WIRELESS_EXT - select WEXT_SPY - select WEXT_PRIV - select FW_LOADER - ---help--- - This enables support for FullMAC PCI/Cardbus prism54 devices. This - driver is now deprecated in favor for the SoftMAC driver, p54pci. - p54pci supports FullMAC PCI/Cardbus devices as well. - - For more information refer to the p54 wiki: - - http://wireless.kernel.org/en/users/Drivers/p54 - - Note: You need a motherboard with DMA support to use any of these cards - - When built as module you get the module prism54 - -config USB_ZD1201 - tristate "USB ZD1201 based Wireless device support" - depends on CFG80211 && USB - select WIRELESS_EXT - select WEXT_PRIV - select FW_LOADER +config MAC80211_HWSIM + tristate "Simulated radio testing tool for mac80211" + depends on MAC80211 ---help--- - Say Y if you want to use wireless LAN adapters based on the ZyDAS - ZD1201 chip. - - This driver makes the adapter appear as a normal Ethernet interface, - typically on wlan0. - - The zd1201 device requires external firmware to be loaded. - This can be found at http://linux-lc100020.sourceforge.net/ + This driver is a developer testing tool that can be used to test + IEEE 802.11 networking stack (mac80211) functionality. This is not + needed for normal wireless LAN usage and is only for testing. See + Documentation/networking/mac80211_hwsim for more information on how + to use this tool. - To compile this driver as a module, choose M here: the - module will be called zd1201. + To compile this driver as a module, choose M here: the module will be + called mac80211_hwsim. If unsure, say N. config USB_NET_RNDIS_WLAN tristate "Wireless RNDIS USB support" @@ -214,76 +100,4 @@ config USB_NET_RNDIS_WLAN If you choose to build a module, it'll be called rndis_wlan. -config ADM8211 - tristate "ADMtek ADM8211 support" - depends on MAC80211 && PCI - select CRC32 - select EEPROM_93CX6 - ---help--- - This driver is for ADM8211A, ADM8211B, and ADM8211C based cards. - These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as: - - Xterasys Cardbus XN-2411b - Blitz NetWave Point PC - TrendNet 221pc - Belkin F5D6001 - SMC 2635W - Linksys WPC11 v1 - Fiberline FL-WL-200X - 3com Office Connect (3CRSHPW796) - Corega WLPCIB-11 - SMC 2602W V2 EU - D-Link DWL-520 Revision C - - However, some of these cards have been replaced with other chips - like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or - the Ralink RT2400 (SMC2635W) without a model number change. - - Thanks to Infineon-ADMtek for their support of this driver. - -source "drivers/net/wireless/realtek/rtl818x/Kconfig" - -config MAC80211_HWSIM - tristate "Simulated radio testing tool for mac80211" - depends on MAC80211 - ---help--- - This driver is a developer testing tool that can be used to test - IEEE 802.11 networking stack (mac80211) functionality. This is not - needed for normal wireless LAN usage and is only for testing. See - Documentation/networking/mac80211_hwsim for more information on how - to use this tool. - - To compile this driver as a module, choose M here: the module will be - called mac80211_hwsim. If unsure, say N. - -config MWL8K - tristate "Marvell 88W8xxx PCI/PCIe Wireless support" - depends on MAC80211 && PCI - ---help--- - This driver supports Marvell TOPDOG 802.11 wireless cards. - - To compile this driver as a module, choose M here: the module - will be called mwl8k. If unsure, say N. - -source "drivers/net/wireless/ath/Kconfig" -source "drivers/net/wireless/b43/Kconfig" -source "drivers/net/wireless/b43legacy/Kconfig" -source "drivers/net/wireless/brcm80211/Kconfig" -source "drivers/net/wireless/hostap/Kconfig" -source "drivers/net/wireless/ipw2x00/Kconfig" -source "drivers/net/wireless/iwlwifi/Kconfig" -source "drivers/net/wireless/iwlegacy/Kconfig" -source "drivers/net/wireless/libertas/Kconfig" -source "drivers/net/wireless/orinoco/Kconfig" -source "drivers/net/wireless/p54/Kconfig" -source "drivers/net/wireless/rt2x00/Kconfig" -source "drivers/net/wireless/mediatek/Kconfig" -source "drivers/net/wireless/realtek/rtlwifi/Kconfig" -source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig" -source "drivers/net/wireless/ti/Kconfig" -source "drivers/net/wireless/zd1211rw/Kconfig" -source "drivers/net/wireless/mwifiex/Kconfig" -source "drivers/net/wireless/cw1200/Kconfig" -source "drivers/net/wireless/rsi/Kconfig" - endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 740fdd353c5d..f00d42953fb8 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -2,27 +2,21 @@ # Makefile for the Linux Wireless network device drivers. # -obj-$(CONFIG_IPW2100) += ipw2x00/ -obj-$(CONFIG_IPW2200) += ipw2x00/ - -obj-$(CONFIG_HERMES) += orinoco/ - -obj-$(CONFIG_AIRO) += airo.o -obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o - -obj-$(CONFIG_ATMEL) += atmel.o -obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o -obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o - -obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o - -obj-$(CONFIG_PRISM54) += prism54/ - -obj-$(CONFIG_HOSTAP) += hostap/ -obj-$(CONFIG_B43) += b43/ -obj-$(CONFIG_B43LEGACY) += b43legacy/ -obj-$(CONFIG_ZD1211RW) += zd1211rw/ -obj-$(CONFIG_WLAN) += realtek/ +obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/ +obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/ +obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/ +obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/ +obj-$(CONFIG_WLAN_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/ +obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/ +obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ +obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/ +obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ +obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ +obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ +obj-$(CONFIG_WLAN_VENDOR_ST) += st/ +obj-$(CONFIG_WLAN_VENDOR_TI) += ti/ +obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/ # 16-bit wireless PCMCIA client drivers obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o @@ -30,33 +24,4 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o -obj-$(CONFIG_USB_ZD1201) += zd1201.o -obj-$(CONFIG_LIBERTAS) += libertas/ - -obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ - -obj-$(CONFIG_ADM8211) += adm8211.o - -obj-$(CONFIG_MWL8K) += mwl8k.o - -obj-$(CONFIG_IWLWIFI) += iwlwifi/ -obj-$(CONFIG_IWLEGACY) += iwlegacy/ -obj-$(CONFIG_RT2X00) += rt2x00/ - -obj-$(CONFIG_WL_MEDIATEK) += mediatek/ - -obj-$(CONFIG_P54_COMMON) += p54/ - -obj-$(CONFIG_ATH_CARDS) += ath/ - obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o - -obj-$(CONFIG_WL_TI) += ti/ - -obj-$(CONFIG_MWIFIEX) += mwifiex/ - -obj-$(CONFIG_BRCMFMAC) += brcm80211/ -obj-$(CONFIG_BRCMSMAC) += brcm80211/ - -obj-$(CONFIG_CW1200) += cw1200/ -obj-$(CONFIG_RSI_91X) += rsi/ diff --git a/drivers/net/wireless/admtek/Kconfig b/drivers/net/wireless/admtek/Kconfig new file mode 100644 index 000000000000..d5a2dc728078 --- /dev/null +++ b/drivers/net/wireless/admtek/Kconfig @@ -0,0 +1,41 @@ +config WLAN_VENDOR_ADMTEK + bool "ADMtek devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_ADMTEK + +config ADM8211 + tristate "ADMtek ADM8211 support" + depends on MAC80211 && PCI + select CRC32 + select EEPROM_93CX6 + ---help--- + This driver is for ADM8211A, ADM8211B, and ADM8211C based cards. + These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as: + + Xterasys Cardbus XN-2411b + Blitz NetWave Point PC + TrendNet 221pc + Belkin F5D6001 + SMC 2635W + Linksys WPC11 v1 + Fiberline FL-WL-200X + 3com Office Connect (3CRSHPW796) + Corega WLPCIB-11 + SMC 2602W V2 EU + D-Link DWL-520 Revision C + + However, some of these cards have been replaced with other chips + like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or + the Ralink RT2400 (SMC2635W) without a model number change. + + Thanks to Infineon-ADMtek for their support of this driver. + +endif # WLAN_VENDOR_ADMTEK diff --git a/drivers/net/wireless/admtek/Makefile b/drivers/net/wireless/admtek/Makefile new file mode 100644 index 000000000000..9cca7e571cdd --- /dev/null +++ b/drivers/net/wireless/admtek/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ADM8211) += adm8211.o diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 15f057ed41ad..15f057ed41ad 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/admtek/adm8211.h index bbc10b1cde87..bbc10b1cde87 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/admtek/adm8211.h diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index ce7826009eeb..44b2470af81d 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -1,13 +1,16 @@ config ATH_COMMON tristate -menuconfig ATH_CARDS - tristate "Atheros Wireless Cards" - depends on CFG80211 && (!UML || BROKEN) +config WLAN_VENDOR_ATH + bool "Atheros/Qualcomm devices" + default y ---help--- - This will enable the support for the Atheros wireless drivers. - ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option - enables the common ath.ko module which shares common helpers. + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. For more information and documentation on this module you can visit: @@ -17,7 +20,7 @@ menuconfig ATH_CARDS http://wireless.kernel.org/en/users/Drivers/Atheros -if ATH_CARDS +if WLAN_VENDOR_ATH config ATH_DEBUG bool "Atheros wireless debugging" diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 72acb822bb11..03aa35f999a1 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -2,6 +2,7 @@ config ATH10K tristate "Atheros 802.11ac wireless cards support" depends on MAC80211 && HAS_DMA select ATH_COMMON + select CRC32 ---help--- This module adds support for wireless adapters based on Atheros IEEE 802.11ac family of chipsets. diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 0947cc271e69..b41eb3f4ee56 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -59,6 +59,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .fw = QCA988X_HW_2_0_FW_FILE, @@ -95,6 +96,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .fw = QCA6174_HW_2_1_FW_FILE, @@ -113,6 +115,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .fw = QCA6174_HW_3_0_FW_FILE, @@ -131,6 +134,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, @@ -151,6 +155,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .continuous_frag_desc = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, .fw = QCA99X0_HW_2_0_FW_FILE, @@ -211,6 +216,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", + [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -887,7 +893,7 @@ out: if (!ar->board_data || !ar->board_len) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", - ar->hw_params.fw.dir, boardname, filename); + boardname, ar->hw_params.fw.dir, filename); ret = -ENODATA; goto err; } @@ -1790,9 +1796,11 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_power_down; } + ath10k_debug_print_hwfw_info(ar); + ret = ath10k_core_get_board_id_from_otp(ar); if (ret && ret != -EOPNOTSUPP) { - ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n", + ath10k_err(ar, "failed to get board id from otp: %d\n", ret); return ret; } @@ -1803,6 +1811,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ath10k_debug_print_board_info(ar); + ret = ath10k_core_init_firmware_features(ar); if (ret) { ath10k_err(ar, "fatal problem with firmware features: %d\n", @@ -1825,7 +1835,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_unlock; } - ath10k_print_driver_info(ar); + ath10k_debug_print_boot_info(ar); ath10k_core_stop(ar); mutex_unlock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 858d75f49a9f..7840cf3ef7a6 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -81,26 +81,20 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) return "unknown"; } +enum ath10k_skb_flags { + ATH10K_SKB_F_NO_HWCRYPT = BIT(0), + ATH10K_SKB_F_DTIM_ZERO = BIT(1), + ATH10K_SKB_F_DELIVER_CAB = BIT(2), + ATH10K_SKB_F_MGMT = BIT(3), + ATH10K_SKB_F_QOS = BIT(4), +}; + struct ath10k_skb_cb { dma_addr_t paddr; + u8 flags; u8 eid; - u8 vdev_id; - enum ath10k_hw_txrx_mode txmode; - bool is_protected; - - struct { - u8 tid; - u16 freq; - bool is_offchan; - bool nohwcrypt; - struct ath10k_htt_txbuf *txbuf; - u32 txbuf_paddr; - } __packed htt; - - struct { - bool dtim_zero; - bool deliver_cab; - } bcn; + u16 msdu_id; + struct ieee80211_vif *vif; } __packed; struct ath10k_skb_rxcb { @@ -151,6 +145,7 @@ struct ath10k_wmi { struct wmi_vdev_param_map *vdev_param; struct wmi_pdev_param_map *pdev_param; const struct wmi_ops *ops; + const struct wmi_peer_flags_map *peer_flags; u32 num_mem_chunks; u32 rx_decap_mode; @@ -512,6 +507,9 @@ enum ath10k_fw_features { /* Firmware Supports Adaptive CCA*/ ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11, + /* Firmware supports management frame protection */ + ATH10K_FW_FEATURE_MFP_SUPPORT = 12, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -534,6 +532,9 @@ enum ath10k_dev_flags { /* Disable HW crypto engine */ ATH10K_FLAG_HW_CRYPTO_DISABLED, + + /* Bluetooth coexistance enabled */ + ATH10K_FLAG_BTCOEX, }; enum ath10k_cal_mode { @@ -662,6 +663,9 @@ struct ath10k { */ u32 max_probe_resp_desc_thres; + /* The padding bytes's location is different on various chips */ + enum ath10k_hw_4addr_pad hw_4addr_pad; + struct ath10k_hw_params_fw { const char *dir; const char *fw; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 6cc1aa3449c8..2bdf5408b0d9 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -19,6 +19,8 @@ #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/utsname.h> +#include <linux/crc32.h> +#include <linux/firmware.h> #include "core.h" #include "debug.h" @@ -122,28 +124,51 @@ void ath10k_info(struct ath10k *ar, const char *fmt, ...) } EXPORT_SYMBOL(ath10k_info); -void ath10k_print_driver_info(struct ath10k *ar) +void ath10k_debug_print_hwfw_info(struct ath10k *ar) { char fw_features[128] = {}; - char boardinfo[100]; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); - if (ar->id.bmi_ids_valid) - scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d", - ar->id.bmi_chip_id, ar->id.bmi_board_id); - else - scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x", - ar->id.subsystem_vendor, ar->id.subsystem_device); - - ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", + ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", ar->hw_params.name, ar->target_version, ar->chip_id, - boardinfo, + ar->id.subsystem_vendor, ar->id.subsystem_device); + + ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", + config_enabled(CONFIG_ATH10K_DEBUG), + config_enabled(CONFIG_ATH10K_DEBUGFS), + config_enabled(CONFIG_ATH10K_TRACING), + config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), + config_enabled(CONFIG_NL80211_TESTMODE)); + + ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, ar->fw_api, + fw_features, + crc32_le(0, ar->firmware->data, ar->firmware->size)); +} + +void ath10k_debug_print_board_info(struct ath10k *ar) +{ + char boardinfo[100]; + + if (ar->id.bmi_ids_valid) + scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", + ar->id.bmi_chip_id, ar->id.bmi_board_id); + else + scnprintf(boardinfo, sizeof(boardinfo), "N/A"); + + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, + boardinfo, + crc32_le(0, ar->board->data, ar->board->size)); +} + +void ath10k_debug_print_boot_info(struct ath10k *ar) +{ + ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", ar->htt.target_version_major, ar->htt.target_version_minor, ar->wmi.op_version, @@ -151,14 +176,14 @@ void ath10k_print_driver_info(struct ath10k *ar) ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), - !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags), - fw_features); - ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", - config_enabled(CONFIG_ATH10K_DEBUG), - config_enabled(CONFIG_ATH10K_DEBUGFS), - config_enabled(CONFIG_ATH10K_TRACING), - config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), - config_enabled(CONFIG_NL80211_TESTMODE)); + !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)); +} + +void ath10k_print_driver_info(struct ath10k *ar) +{ + ath10k_debug_print_hwfw_info(ar); + ath10k_debug_print_board_info(ar); + ath10k_debug_print_boot_info(ar); } EXPORT_SYMBOL(ath10k_print_driver_info); @@ -1114,7 +1139,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, { struct ath10k *ar = file->private_data; char buf[64]; - u8 amsdu = 3, ampdu = 64; + u8 amsdu, ampdu; unsigned int len; mutex_lock(&ar->conf_mutex); @@ -2074,6 +2099,121 @@ static const struct file_operations fops_quiet_period = { .open = simple_open }; +static ssize_t ath10k_write_btcoex(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t buf_size; + bool val; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, ubuf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) + goto exit; + + if (val) + set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + + if (ar->state != ATH10K_STATE_ON) + goto exit; + + ath10k_info(ar, "restarting firmware due to btcoex change"); + + queue_work(ar->workqueue, &ar->restart_work); + +exit: + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_btcoex = { + .read = ath10k_read_btcoex, + .write = ath10k_write_btcoex, + .open = simple_open +}; + +static ssize_t ath10k_debug_fw_checksums_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned int len = 0, buf_len = 4096; + ssize_t ret_cnt; + char *buf; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + if (len > buf_len) + len = buf_len; + + len += scnprintf(buf + len, buf_len - len, + "firmware-N.bin\t\t%08x\n", + crc32_le(0, ar->firmware->data, ar->firmware->size)); + len += scnprintf(buf + len, buf_len - len, + "athwlan\t\t\t%08x\n", + crc32_le(0, ar->firmware_data, ar->firmware_len)); + len += scnprintf(buf + len, buf_len - len, + "otp\t\t\t%08x\n", + crc32_le(0, ar->otp_data, ar->otp_len)); + len += scnprintf(buf + len, buf_len - len, + "codeswap\t\t%08x\n", + crc32_le(0, ar->swap.firmware_codeswap_data, + ar->swap.firmware_codeswap_len)); + len += scnprintf(buf + len, buf_len - len, + "board-N.bin\t\t%08x\n", + crc32_le(0, ar->board->data, ar->board->size)); + len += scnprintf(buf + len, buf_len - len, + "board\t\t\t%08x\n", + crc32_le(0, ar->board_data, ar->board_len)); + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + mutex_unlock(&ar->conf_mutex); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_fw_checksums = { + .read = ath10k_debug_fw_checksums_read, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); @@ -2123,8 +2263,8 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_wmi_services); - debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_simulate_fw_crash); + debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_crash_dump); @@ -2141,15 +2281,15 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); - debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_htt_stats_mask); + debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_htt_stats_mask); debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_htt_max_amsdu_ampdu); - debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_fw_dbglog); + debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_fw_dbglog); debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_cal_data); @@ -2183,6 +2323,13 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("tpc_stats", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_tpc_stats); + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + debugfs_create_file("btcoex", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_btcoex); + + debugfs_create_file("fw_checksums", S_IRUSR, + ar->debug.debugfs_phy, ar, &fops_fw_checksums); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 7de780c4ec8d..814719cf4f22 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -63,6 +63,10 @@ extern unsigned int ath10k_debug_mask; __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...); + +void ath10k_debug_print_hwfw_info(struct ath10k *ar); +void ath10k_debug_print_board_info(struct ath10k *ar); +void ath10k_debug_print_boot_info(struct ath10k *ar); void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 2bad50e520b5..47ca048feaf0 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -166,8 +166,13 @@ struct htt_data_tx_desc { __le16 len; __le16 id; __le32 frags_paddr; - __le16 peerid; - __le16 freq; + union { + __le32 peerid; + struct { + __le16 peerid; + __le16 freq; + } __packed offchan_tx; + } __packed; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; @@ -1597,6 +1602,10 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); +int ath10k_htt_tx(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu); +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6060dda4e910..91afa3ae414c 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -536,7 +536,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); - vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); + vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); if (!vaddr) goto err_dma_ring; @@ -545,7 +545,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) vaddr = dma_alloc_coherent(htt->ar->dev, sizeof(*htt->rx_ring.alloc_idx.vaddr), - &paddr, GFP_DMA); + &paddr, GFP_KERNEL); if (!vaddr) goto err_dma_idx; @@ -674,7 +674,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, rate &= ~RX_PPDU_START_RATE_FLAG; sband = &ar->mac.sbands[status->band]; - status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate); + status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); break; case HTT_RX_HT: case HTT_RX_HT_WITH_TXBF: @@ -1114,7 +1114,20 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, */ /* pull decapped header and copy SA & DA */ - hdr = (struct ieee80211_hdr *)msdu->data; + if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) && + ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) { + /* The QCA99X0 4 address mode pad 2 bytes at the + * beginning of MSDU + */ + hdr = (struct ieee80211_hdr *)(msdu->data + 2); + /* The skb length need be extended 2 as the 2 bytes at the tail + * be excluded due to the padding + */ + skb_put(msdu, 2); + } else { + hdr = (struct ieee80211_hdr *)(msdu->data); + } + hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); @@ -2127,6 +2140,18 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) } EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb) +{ + struct ath10k_pktlog_10_4_hdr *hdr = + (struct ath10k_pktlog_10_4_hdr *)skb->data; + + trace_ath10k_htt_pktlog(ar, hdr->payload, + sizeof(*hdr) + __le16_to_cpu(hdr->size)); + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); + static void ath10k_htt_txrx_compl_task(unsigned long ptr) { struct ath10k_htt *htt = (struct ath10k_htt *)ptr; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 16823970dbfd..b3adadb5f824 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -111,7 +111,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, - GFP_DMA); + GFP_KERNEL); if (!htt->txbuf.vaddr) { ath10k_err(ar, "failed to alloc tx buffer\n"); ret = -ENOMEM; @@ -124,7 +124,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, - GFP_DMA); + GFP_KERNEL); if (!htt->frag_desc.vaddr) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; @@ -439,6 +439,35 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, return 0; } +static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + return ar->scan.vdev_id; + else if (cb->vif) + return arvif->vdev_id; + else if (ar->monitor_started) + return ar->monitor_vdev_id; + else + return 0; +} + +static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) + return HTT_DATA_TX_EXT_TID_MGMT; + else if (cb->flags & ATH10K_SKB_F_QOS) + return skb->priority % IEEE80211_QOS_CTL_TID_MASK; + else + return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; +} + int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; @@ -446,7 +475,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); - u8 vdev_id = skb_cb->vdev_id; + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; @@ -477,6 +506,13 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) msdu_id = res; + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; @@ -503,8 +539,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); - skb_cb->htt.txbuf = NULL; - res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; @@ -525,21 +559,27 @@ err: return res; } -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) +int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; + struct ath10k_htt_txbuf *txbuf; struct htt_data_tx_desc_frag *frags; - u8 vdev_id = skb_cb->vdev_id; - u8 tid = skb_cb->htt.tid; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; + u16 freq = 0; u32 frags_paddr = 0; + u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; bool limit_mgmt_desc = false; bool is_probe_resp = false; @@ -567,17 +607,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); - skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; - skb_cb->htt.txbuf_paddr = htt->txbuf.paddr + - (sizeof(struct ath10k_htt_txbuf) * msdu_id); + txbuf = &htt->txbuf.vaddr[msdu_id]; + txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); - } else if (!skb_cb->htt.nohwcrypt && - skb_cb->txmode == ATH10K_HW_TXRX_RAW && + } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && + txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } @@ -590,7 +630,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) goto err_free_msdu_id; } - switch (skb_cb->txmode) { + if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; @@ -610,16 +653,16 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { - frags = skb_cb->htt.txbuf->frags; + frags = txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); frags[0].dword_addr.len = __cpu_to_le32(msdu->len); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; - frags_paddr = skb_cb->htt.txbuf_paddr; + frags_paddr = txbuf_paddr; } - flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, @@ -646,17 +689,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) * avoid extra memory allocations, compress data structures and thus * improve performance. */ - skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; - skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( - sizeof(skb_cb->htt.txbuf->cmd_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_tx) + - prefetch_len); - skb_cb->htt.txbuf->htc_hdr.flags = 0; - - if (skb_cb->htt.nohwcrypt) - flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + txbuf->htc_hdr.eid = htt->eid; + txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx) + + prefetch_len); + txbuf->htc_hdr.flags = 0; - if (!skb_cb->is_protected) + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); @@ -675,20 +714,27 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; - skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; - skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; - skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); - skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); - skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); - skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); - skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); - skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); + txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + txbuf->cmd_tx.flags0 = flags0; + txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); + txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); + txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); + txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); + if (ath10k_mac_tx_frm_has_freq(ar)) { + txbuf->cmd_tx.offchan_tx.peerid = + __cpu_to_le16(HTT_INVALID_PEERID); + txbuf->cmd_tx.offchan_tx.freq = + __cpu_to_le16(freq); + } else { + txbuf->cmd_tx.peerid = + __cpu_to_le32(HTT_INVALID_PEERID); + } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, - (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); + (u32)skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); @@ -696,12 +742,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; - sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; - sg_items[0].paddr = skb_cb->htt.txbuf_paddr + - sizeof(skb_cb->htt.txbuf->frags); - sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_tx); + sg_items[0].vaddr = &txbuf->htc_hdr; + sg_items[0].paddr = txbuf_paddr + + sizeof(txbuf->frags); + sg_items[0].len = sizeof(txbuf->htc_hdr) + + sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 713c2bcea178..0678831e8671 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -286,6 +286,16 @@ struct ath10k_pktlog_hdr { u8 payload[0]; } __packed; +struct ath10k_pktlog_10_4_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; + __le16 size; + __le32 timestamp; + __le32 type_specific_data; + u8 payload[0]; +} __packed; + enum ath10k_hw_rate_ofdm { ATH10K_HW_RATE_OFDM_48M = 0, ATH10K_HW_RATE_OFDM_24M, @@ -307,6 +317,11 @@ enum ath10k_hw_rate_cck { ATH10K_HW_RATE_CCK_SP_2M, }; +enum ath10k_hw_4addr_pad { + ATH10K_HW_4ADDR_PAD_AFTER, + ATH10K_HW_4ADDR_PAD_BEFORE, +}; + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 95a55405ebf0..6146a293601a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -90,7 +90,7 @@ static u8 ath10k_mac_bitrate_to_rate(int bitrate) } u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, - u8 hw_rate) + u8 hw_rate, bool cck) { const struct ieee80211_rate *rate; int i; @@ -98,6 +98,9 @@ u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, for (i = 0; i < sband->n_bitrates; i++) { rate = &sband->bitrates[i]; + if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) + continue; + if (rate->hw_value == hw_rate) return i; else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && @@ -247,7 +250,8 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && - arvif->vif->type != NL80211_IFTYPE_ADHOC)) + arvif->vif->type != NL80211_IFTYPE_ADHOC && + arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) return -EINVAL; spin_lock_bh(&ar->data_lock); @@ -1960,7 +1964,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = aid; - arg->peer_flags |= WMI_PEER_AUTH; + arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; arg->peer_caps = vif->bss_conf.assoc_capability; @@ -1968,6 +1972,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { struct ieee80211_bss_conf *info = &vif->bss_conf; @@ -2002,12 +2007,17 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; } if (wpaie) { ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; + } + + if (sta->mfp && + test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) { + arg->peer_flags |= ar->wmi.peer_flags->pmf; } } @@ -2104,7 +2114,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; - arg->peer_flags |= WMI_PEER_HT; + arg->peer_flags |= ar->wmi.peer_flags->ht; arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + ht_cap->ampdu_factor)) - 1; @@ -2115,10 +2125,10 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_rate_caps |= WMI_RC_HT_FLAG; if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) - arg->peer_flags |= WMI_PEER_LDPC; + arg->peer_flags |= ar->wmi.peer_flags->ldbc; if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { - arg->peer_flags |= WMI_PEER_40MHZ; + arg->peer_flags |= ar->wmi.peer_flags->bw40; arg->peer_rate_caps |= WMI_RC_CW40_FLAG; } @@ -2132,7 +2142,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; - arg->peer_flags |= WMI_PEER_STBC; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { @@ -2140,7 +2150,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_RC_RX_STBC_FLAG_S; arg->peer_rate_caps |= stbc; - arg->peer_flags |= WMI_PEER_STBC; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) @@ -2321,10 +2331,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; - arg->peer_flags |= WMI_PEER_VHT; + arg->peer_flags |= ar->wmi.peer_flags->vht; if (def.chan->band == IEEE80211_BAND_2GHZ) - arg->peer_flags |= WMI_PEER_VHT_2G; + arg->peer_flags |= ar->wmi.peer_flags->vht_2g; arg->peer_vht_caps = vht_cap->cap; @@ -2341,7 +2351,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, ampdu_factor)) - 1); if (sta->bandwidth == IEEE80211_STA_RX_BW_80) - arg->peer_flags |= WMI_PEER_80MHZ; + arg->peer_flags |= ar->wmi.peer_flags->bw80; arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); @@ -2366,27 +2376,28 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; if (sta->wme && sta->uapsd_queues) { - arg->peer_flags |= WMI_PEER_APSD; + arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; } break; case WMI_VDEV_TYPE_STA: if (vif->bss_conf.qos) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; case WMI_VDEV_TYPE_IBSS: if (sta->wme) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; default: break; } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", - sta->addr, !!(arg->peer_flags & WMI_PEER_QOS)); + sta->addr, !!(arg->peer_flags & + arvif->ar->wmi.peer_flags->qos)); } static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) @@ -2479,7 +2490,7 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar, memset(arg, 0, sizeof(*arg)); ath10k_peer_assoc_h_basic(ar, vif, sta, arg); - ath10k_peer_assoc_h_crypto(ar, vif, arg); + ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); ath10k_peer_assoc_h_rates(ar, vif, sta, arg); ath10k_peer_assoc_h_ht(ar, vif, sta, arg); ath10k_peer_assoc_h_vht(ar, vif, sta, arg); @@ -3112,35 +3123,11 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, spin_unlock_bh(&ar->htt.tx_lock); } -static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) -{ - if (ieee80211_is_mgmt(hdr->frame_control)) - return HTT_DATA_TX_EXT_TID_MGMT; - - if (!ieee80211_is_data_qos(hdr->frame_control)) - return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - - if (!is_unicast_ether_addr(ieee80211_get_DA(hdr))) - return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - - return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; -} - -static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif) -{ - if (vif) - return ath10k_vif_to_arvif(vif)->vdev_id; - - if (ar->monitor_started) - return ar->monitor_vdev_id; - - ath10k_warn(ar, "failed to resolve vdev id\n"); - return 0; -} - static enum ath10k_hw_txrx_mode -ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff *skb) +ath10k_mac_tx_h_get_txmode(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; @@ -3190,14 +3177,22 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, } static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, - struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff *skb) +{ + const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const struct ieee80211_hdr *hdr = (void *)skb->data; const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_INJECTED; + + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + if ((info->flags & mask) == mask) return false; + if (vif) return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return true; } @@ -3224,7 +3219,7 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) */ hdr = (void *)skb->data; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) - cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + cb->flags &= ~ATH10K_SKB_F_QOS; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } @@ -3280,7 +3275,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, } } -static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) { /* FIXME: Not really sure since when the behaviour changed. At some * point new firmware stopped requiring creation of peer entries for @@ -3288,8 +3283,9 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) * tx credit replenishment and reliability). Assuming it's at least 3.4 * because that's when the `freq` was introduced to TX_FRM HTT command. */ - return !(ar->htt.target_version_major >= 3 && - ar->htt.target_version_minor >= 4); + return (ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4 && + ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV); } static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) @@ -3314,24 +3310,24 @@ unlock: return ret; } -static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *skb) { - struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); struct ath10k_htt *htt = &ar->htt; int ret = 0; - switch (cb->txmode) { + switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: case ATH10K_HW_TXRX_ETHERNET: - ret = ath10k_htt_tx(htt, skb); + ret = ath10k_htt_tx(htt, txmode, skb); break; case ATH10K_HW_TXRX_MGMT: if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features)) ret = ath10k_mac_tx_wmi_mgmt(ar, skb); else if (ar->htt.target_version_major >= 3) - ret = ath10k_htt_tx(htt, skb); + ret = ath10k_htt_tx(htt, txmode, skb); else ret = ath10k_htt_mgmt_tx(htt, skb); break; @@ -3361,9 +3357,13 @@ void ath10k_offchan_tx_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); struct ath10k_peer *peer; + struct ath10k_vif *arvif; struct ieee80211_hdr *hdr; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; struct sk_buff *skb; const u8 *peer_addr; + enum ath10k_hw_txrx_mode txmode; int vdev_id; int ret; unsigned long time_left; @@ -3388,9 +3388,9 @@ void ath10k_offchan_tx_work(struct work_struct *work) hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); - vdev_id = ATH10K_SKB_CB(skb)->vdev_id; spin_lock_bh(&ar->data_lock); + vdev_id = ar->scan.vdev_id; peer = ath10k_peer_find(ar, vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); @@ -3413,7 +3413,22 @@ void ath10k_offchan_tx_work(struct work_struct *work) ar->offchan_tx_skb = skb; spin_unlock_bh(&ar->data_lock); - ath10k_mac_tx(ar, skb); + /* It's safe to access vif and sta - conf_mutex guarantees that + * sta_state() and remove_interface() are locked exclusively + * out wrt to this offchannel worker. + */ + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif) { + vif = arvif->vif; + sta = ieee80211_find_sta(vif, peer_addr); + } else { + vif = NULL; + sta = NULL; + } + + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + + ath10k_mac_tx(ar, txmode, skb); time_left = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); @@ -3488,6 +3503,7 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_STARTING: ar->scan.state = ATH10K_SCAN_IDLE; ar->scan_channel = NULL; + ar->scan.roc_freq = 0; ath10k_offchan_tx_purge(ar); cancel_delayed_work(&ar->scan.timeout); complete_all(&ar->scan.completed); @@ -3631,25 +3647,32 @@ static void ath10k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ath10k *ar = hw->priv; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - __le16 fc = hdr->frame_control; + enum ath10k_hw_txrx_mode txmode; /* We should disable CCK RATE due to P2P */ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); - ATH10K_SKB_CB(skb)->htt.is_offchan = false; - ATH10K_SKB_CB(skb)->htt.freq = 0; - ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); - ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb); - ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); - ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb); - ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc); + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + + skb_cb->flags = 0; + if (!ath10k_tx_h_use_hwcrypto(vif, skb)) + skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; + + if (ieee80211_is_mgmt(hdr->frame_control)) + skb_cb->flags |= ATH10K_SKB_F_MGMT; - switch (ATH10K_SKB_CB(skb)->txmode) { + if (ieee80211_is_data_qos(hdr->frame_control)) + skb_cb->flags |= ATH10K_SKB_F_QOS; + + skb_cb->vif = vif; + + switch (txmode) { case ATH10K_HW_TXRX_MGMT: case ATH10K_HW_TXRX_NATIVE_WIFI: ath10k_tx_h_nwifi(hw, skb); @@ -3668,15 +3691,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, } if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { - spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; - ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; - spin_unlock_bh(&ar->data_lock); - - if (ath10k_mac_need_offchan_tx_work(ar)) { - ATH10K_SKB_CB(skb)->htt.freq = 0; - ATH10K_SKB_CB(skb)->htt.is_offchan = true; - + if (!ath10k_mac_tx_frm_has_freq(ar)) { ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); @@ -3686,7 +3701,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, } } - ath10k_mac_tx(ar, skb); + ath10k_mac_tx(ar, txmode, skb); } /* Must not be called with conf_mutex held as workers can use that also. */ @@ -3845,7 +3860,8 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; - ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; + ht_cap.cap |= + WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; @@ -4350,7 +4366,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->vdev_type = WMI_VDEV_TYPE_IBSS; break; case NL80211_IFTYPE_MESH_POINT: - if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + if (test_bit(WMI_SERVICE_MESH, ar->wmi.svc_map)) { + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH; + } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ret = -EINVAL; ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); goto err; @@ -6907,35 +6925,39 @@ void ath10k_mac_destroy(struct ath10k *ar) static const struct ieee80211_iface_limit ath10k_if_limits[] = { { - .max = 8, - .types = BIT(NL80211_IFTYPE_STATION) - | BIT(NL80211_IFTYPE_P2P_CLIENT) + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { - .max = 3, - .types = BIT(NL80211_IFTYPE_P2P_GO) + .max = 3, + .types = BIT(NL80211_IFTYPE_P2P_GO) }, { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, { - .max = 7, - .types = BIT(NL80211_IFTYPE_AP) + .max = 7, + .types = BIT(NL80211_IFTYPE_AP) #ifdef CONFIG_MAC80211_MESH - | BIT(NL80211_IFTYPE_MESH_POINT) + | BIT(NL80211_IFTYPE_MESH_POINT) #endif }, }; static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { { - .max = 8, - .types = BIT(NL80211_IFTYPE_AP) + .max = 8, + .types = BIT(NL80211_IFTYPE_AP) #ifdef CONFIG_MAC80211_MESH - | BIT(NL80211_IFTYPE_MESH_POINT) + | BIT(NL80211_IFTYPE_MESH_POINT) #endif }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION) + }, }; static const struct ieee80211_iface_combination ath10k_if_comb[] = { diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index e3cefe4c7cfd..53091588090d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -66,7 +66,7 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, enum wmi_tlv_tx_pause_action action); u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, - u8 hw_rate); + u8 hw_rate, bool cck); u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, u32 bitrate); @@ -74,6 +74,7 @@ void ath10k_mac_tx_lock(struct ath10k *ar, int reason); void ath10k_mac_tx_unlock(struct ath10k *ar, int reason); void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 930785a724e1..ee925c618535 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -108,6 +108,7 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ @@ -186,6 +187,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, + .recv_cb = ath10k_pci_pktlog_rx_cb, }, /* CE9 target autonomous qcache memcpy */ @@ -485,6 +487,9 @@ static int ath10k_pci_force_wake(struct ath10k *ar) unsigned long flags; int ret = 0; + if (ar_pci->pci_ps) + return ret; + spin_lock_irqsave(&ar_pci->ps_lock, flags); if (!ar_pci->ps_awake) { @@ -1215,6 +1220,15 @@ static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } +/* Called by lower (CE) layer when data is received from the Target. + * Only 10.4 firmware uses separate CE to transfer pktlog data. + */ +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_pci_process_rx_cb(ce_state, + ath10k_htt_rx_pktlog_completion_handler); +} + /* Called by lower (CE) layer when a send to HTT Target completes. */ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { @@ -2469,12 +2483,10 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) u32 val; int ret = 0; - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up target: %d\n", ret); - return ret; - } + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; } /* Suspend/Resume resets the PCI configuration space, so we have to @@ -2581,13 +2593,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake device up on irq: %d\n", - ret); - return IRQ_NONE; - } + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); + return IRQ_NONE; } if (ar_pci->num_msi_intrs == 0) { @@ -3060,17 +3069,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_sleep; } + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake up device : %d\n", ret); + goto err_free_pipes; + } + ath10k_pci_ce_deinit(ar); ath10k_pci_irq_disable(ar); - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake up device : %d\n", ret); - goto err_free_pipes; - } - } - ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index 60fe562e3041..444b52c7e4f3 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -187,7 +187,7 @@ int ath10k_thermal_register(struct ath10k *ar) /* Do not register hwmon device when temperature reading is not * supported by firmware */ - if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4) + if (!(ar->wmi.ops->gen_pdev_get_temperature)) return 0; /* Avoid linking error on devm_hwmon_device_register_with_groups, I diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 6d1105ab4592..fbfb608e48ab 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -23,7 +23,12 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) { - if (!ATH10K_SKB_CB(skb)->htt.is_offchan) + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))) + return; + + if (ath10k_mac_tx_frm_has_freq(ar)) return; /* If the original wait_for_completion() timed out before @@ -52,8 +57,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct ieee80211_tx_info *info; struct ath10k_skb_cb *skb_cb; struct sk_buff *msdu; - struct ieee80211_hdr *hdr; - __le16 fc; bool limit_mgmt_desc = false; ath10k_dbg(ar, ATH10K_DBG_HTT, @@ -76,10 +79,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, return; } - hdr = (struct ieee80211_hdr *)msdu->data; - fc = hdr->frame_control; + skb_cb = ATH10K_SKB_CB(msdu); - if (unlikely(ieee80211_is_mgmt(fc)) && + if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) && ar->hw_params.max_probe_resp_desc_thres) limit_mgmt_desc = true; @@ -89,7 +91,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); - skb_cb = ATH10K_SKB_CB(msdu); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); ath10k_report_offchan_tx(htt->ar, msdu); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 6fbd17b69469..3b3a27b859f3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3485,6 +3485,24 @@ static const struct wmi_ops wmi_tlv_ops = { .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, }; +static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { + .auth = WMI_TLV_PEER_AUTH, + .qos = WMI_TLV_PEER_QOS, + .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY, + .apsd = WMI_TLV_PEER_APSD, + .ht = WMI_TLV_PEER_HT, + .bw40 = WMI_TLV_PEER_40MHZ, + .stbc = WMI_TLV_PEER_STBC, + .ldbc = WMI_TLV_PEER_LDPC, + .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS, + .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX, + .vht = WMI_TLV_PEER_VHT, + .bw80 = WMI_TLV_PEER_80MHZ, + .pmf = WMI_TLV_PEER_PMF, +}; + /************/ /* TLV init */ /************/ @@ -3495,4 +3513,5 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; ar->wmi.ops = &wmi_tlv_ops; + ar->wmi.peer_flags = &wmi_tlv_peer_flags_map; } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index ad655c44afdb..dd678590531a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -527,6 +527,24 @@ enum wmi_tlv_vdev_param { WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE, }; +enum wmi_tlv_peer_flags { + WMI_TLV_PEER_AUTH = 0x00000001, + WMI_TLV_PEER_QOS = 0x00000002, + WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_TLV_PEER_APSD = 0x00000800, + WMI_TLV_PEER_HT = 0x00001000, + WMI_TLV_PEER_40MHZ = 0x00002000, + WMI_TLV_PEER_STBC = 0x00008000, + WMI_TLV_PEER_LDPC = 0x00010000, + WMI_TLV_PEER_DYN_MIMOPS = 0x00020000, + WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000, + WMI_TLV_PEER_SPATIAL_MUX = 0x00200000, + WMI_TLV_PEER_VHT = 0x02000000, + WMI_TLV_PEER_80MHZ = 0x04000000, + WMI_TLV_PEER_PMF = 0x08000000, +}; + enum wmi_tlv_tag { WMI_TLV_TAG_LAST_RESERVED = 15, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 7569db0f69b5..a7c3d299639b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1546,6 +1546,61 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, }; +static const struct wmi_peer_flags_map wmi_peer_flags_map = { + .auth = WMI_PEER_AUTH, + .qos = WMI_PEER_QOS, + .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY, + .apsd = WMI_PEER_APSD, + .ht = WMI_PEER_HT, + .bw40 = WMI_PEER_40MHZ, + .stbc = WMI_PEER_STBC, + .ldbc = WMI_PEER_LDPC, + .dyn_mimops = WMI_PEER_DYN_MIMOPS, + .static_mimops = WMI_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_PEER_SPATIAL_MUX, + .vht = WMI_PEER_VHT, + .bw80 = WMI_PEER_80MHZ, + .vht_2g = WMI_PEER_VHT_2G, + .pmf = WMI_PEER_PMF, +}; + +static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { + .auth = WMI_10X_PEER_AUTH, + .qos = WMI_10X_PEER_QOS, + .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10X_PEER_APSD, + .ht = WMI_10X_PEER_HT, + .bw40 = WMI_10X_PEER_40MHZ, + .stbc = WMI_10X_PEER_STBC, + .ldbc = WMI_10X_PEER_LDPC, + .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS, + .static_mimops = WMI_10X_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, + .vht = WMI_10X_PEER_VHT, + .bw80 = WMI_10X_PEER_80MHZ, +}; + +static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { + .auth = WMI_10_2_PEER_AUTH, + .qos = WMI_10_2_PEER_QOS, + .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10_2_PEER_APSD, + .ht = WMI_10_2_PEER_HT, + .bw40 = WMI_10_2_PEER_40MHZ, + .stbc = WMI_10_2_PEER_STBC, + .ldbc = WMI_10_2_PEER_LDPC, + .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS, + .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX, + .vht = WMI_10_2_PEER_VHT, + .bw80 = WMI_10_2_PEER_80MHZ, + .vht_2g = WMI_10_2_PEER_VHT_2G, + .pmf = WMI_10_2_PEER_PMF, +}; + void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, const struct wmi_channel_arg *arg) { @@ -1660,6 +1715,8 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) struct ath10k *ar = arvif->ar; struct ath10k_skb_cb *cb; struct sk_buff *bcn; + bool dtim_zero; + bool deliver_cab; int ret; spin_lock_bh(&ar->data_lock); @@ -1679,12 +1736,14 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) arvif->beacon_state = ATH10K_BEACON_SENDING; spin_unlock_bh(&ar->data_lock); + dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO); + deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB); ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, arvif->vdev_id, bcn->data, bcn->len, cb->paddr, - cb->bcn.dtim_zero, - cb->bcn.deliver_cab); + dtim_zero, + deliver_cab); spin_lock_bh(&ar->data_lock); @@ -1755,16 +1814,24 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) static struct sk_buff * ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; struct wmi_mgmt_tx_cmd *cmd; struct ieee80211_hdr *hdr; struct sk_buff *skb; int len; + u32 vdev_id; u32 buf_len = msdu->len; u16 fc; hdr = (struct ieee80211_hdr *)msdu->data; fc = le16_to_cpu(hdr->frame_control); + if (cb->vif) + vdev_id = arvif->vdev_id; + else + vdev_id = 0; + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) return ERR_PTR(-EINVAL); @@ -1786,7 +1853,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) cmd = (struct wmi_mgmt_tx_cmd *)skb->data; - cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id); + cmd->hdr.vdev_id = __cpu_to_le32(vdev_id); cmd->hdr.tx_rate = 0; cmd->hdr.tx_power = 0; cmd->hdr.buf_len = __cpu_to_le32(buf_len); @@ -2204,22 +2271,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx status %08x\n", rx_status); - if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_CRC) { + if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) || + (rx_status & (WMI_RX_STATUS_ERR_DECRYPT | + WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { dev_kfree_skb(skb); return 0; } @@ -3115,10 +3169,10 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); if (tim->dtim_count == 0) { - ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO; if (__le32_to_cpu(tim_info->tim_mcast) == 1) - ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB; } ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", @@ -4258,34 +4312,58 @@ void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); } -static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, - u32 num_units, u32 unit_len) +static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) { dma_addr_t paddr; - u32 pool_size; + u32 pool_size = 0; int idx = ar->wmi.num_mem_chunks; + void *vaddr = NULL; - pool_size = num_units * round_up(unit_len, 4); + if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks)) + return -ENOMEM; - if (!pool_size) - return -EINVAL; + while (!vaddr && num_units) { + pool_size = num_units * round_up(unit_len, 4); + if (!pool_size) + return -EINVAL; - ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, - pool_size, - &paddr, - GFP_KERNEL); - if (!ar->wmi.mem_chunks[idx].vaddr) { - ath10k_warn(ar, "failed to allocate memory chunk\n"); - return -ENOMEM; + vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN); + if (!vaddr) + num_units /= 2; } - memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); + if (!num_units) + return -ENOMEM; + paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE); + if (dma_mapping_error(ar->dev, paddr)) { + kfree(vaddr); + return -ENOMEM; + } + + ar->wmi.mem_chunks[idx].vaddr = vaddr; ar->wmi.mem_chunks[idx].paddr = paddr; ar->wmi.mem_chunks[idx].len = pool_size; ar->wmi.mem_chunks[idx].req_id = req_id; ar->wmi.num_mem_chunks++; + return num_units; +} + +static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) +{ + int ret; + + while (num_units) { + ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len); + if (ret < 0) + return ret; + + num_units -= ret; + } + return 0; } @@ -5061,6 +5139,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_UPDATE_STATS_EVENTID: ath10k_wmi_event_update_stats(ar, skb); break; + case WMI_10_4_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_event_temperature(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -5431,8 +5512,11 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10_2 *)buf->data; features = WMI_10_2_RX_BATCH_MODE; - if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + + if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) && + test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) features |= WMI_10_2_COEX_GPIO; + cmd->resource_config.feature_mask = __cpu_to_le32(features); memcpy(&cmd->resource_config.common, &config, sizeof(config)); @@ -6328,6 +6412,16 @@ ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, cmd->info0 = __cpu_to_le32(info0); } +static void +ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg); + cmd->peer_bw_rxnss_override = 0; +} + static int ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg) { @@ -6417,6 +6511,31 @@ ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd); + struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) { struct sk_buff *skb; @@ -7536,6 +7655,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc, .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, @@ -7555,8 +7675,8 @@ static const struct wmi_ops wmi_10_4_ops = { .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, /* shared with 10.2 */ - .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, }; int ath10k_wmi_attach(struct ath10k *ar) @@ -7567,30 +7687,35 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.cmd = &wmi_10_4_cmd_map; ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_2_4: ar->wmi.cmd = &wmi_10_2_4_cmd_map; ar->wmi.ops = &wmi_10_2_4_ops; ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_2: ar->wmi.cmd = &wmi_10_2_cmd_map; ar->wmi.ops = &wmi_10_2_ops; ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_1: ar->wmi.cmd = &wmi_10x_cmd_map; ar->wmi.ops = &wmi_10_1_ops; ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_flags = &wmi_10x_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->wmi.cmd = &wmi_cmd_map; ar->wmi.ops = &wmi_ops; ar->wmi.vdev_param = &wmi_vdev_param_map; ar->wmi.pdev_param = &wmi_pdev_param_map; + ar->wmi.peer_flags = &wmi_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_TLV: ath10k_wmi_tlv_attach(ar); @@ -7616,10 +7741,11 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar) /* free the host memory chunks requested by firmware */ for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - dma_free_coherent(ar->dev, - ar->wmi.mem_chunks[i].len, - ar->wmi.mem_chunks[i].vaddr, - ar->wmi.mem_chunks[i].paddr); + dma_unmap_single(ar->dev, + ar->wmi.mem_chunks[i].paddr, + ar->wmi.mem_chunks[i].len, + DMA_TO_DEVICE); + kfree(ar->wmi.mem_chunks[i].vaddr); } ar->wmi.num_mem_chunks = 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 72a4ef709577..d85ad7855d20 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -175,6 +175,8 @@ enum wmi_service { WMI_SERVICE_AUX_SPECTRAL_INTF, WMI_SERVICE_AUX_CHAN_LOAD_INTF, WMI_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_MESH, /* keep last */ WMI_SERVICE_MAX, @@ -206,6 +208,11 @@ enum wmi_10x_service { WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, WMI_10X_SERVICE_ATF, WMI_10X_SERVICE_COEX_GPIO, + WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10X_SERVICE_MESH, + WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, }; enum wmi_main_service { @@ -286,6 +293,8 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10_4_SERVICE_MESH, }; static inline char *wmi_service_name(int service_id) @@ -375,6 +384,8 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF); SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); + SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); + SVCSTR(WMI_SERVICE_MESH); default: return NULL; } @@ -442,6 +453,16 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_ATF, len); SVCMAP(WMI_10X_SERVICE_COEX_GPIO, WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10X_SERVICE_MESH, + WMI_SERVICE_MESH, len); + SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -600,6 +621,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_MESH, + WMI_SERVICE_MESH, len); } #undef SVCMAP @@ -1576,6 +1601,9 @@ enum wmi_10_4_cmd_id { WMI_10_4_MU_CAL_START_CMDID, WMI_10_4_SET_CCA_PARAMS_CMDID, WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_4_EXT_RESOURCE_CFG_CMDID, + WMI_10_4_VDEV_SET_IE_CMDID, + WMI_10_4_SET_LTEU_CONFIG_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1638,6 +1666,7 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_TEMPERATURE_EVENTID, WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_4_MU_REPORT_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -3650,6 +3679,12 @@ enum wmi_10_4_pdev_param { WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, WMI_10_4_PDEV_PARAM_ARP_SRCADDR, WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB, + WMI_10_4_PDEV_PARAM_RX_BATCHMODE, + WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, + WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, }; struct wmi_pdev_set_param_cmd { @@ -4239,6 +4274,8 @@ enum wmi_vdev_subtype { WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, WMI_VDEV_SUBTYPE_P2P_GO = 3, + WMI_VDEV_SUBTYPE_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_MESH = 5, }; /* values for vdev_subtype */ @@ -5641,21 +5678,79 @@ struct wmi_peer_set_q_empty_callback_cmd { __le32 callback_enable; } __packed; -#define WMI_PEER_AUTH 0x00000001 -#define WMI_PEER_QOS 0x00000002 -#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 -#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 -#define WMI_PEER_APSD 0x00000800 -#define WMI_PEER_HT 0x00001000 -#define WMI_PEER_40MHZ 0x00002000 -#define WMI_PEER_STBC 0x00008000 -#define WMI_PEER_LDPC 0x00010000 -#define WMI_PEER_DYN_MIMOPS 0x00020000 -#define WMI_PEER_STATIC_MIMOPS 0x00040000 -#define WMI_PEER_SPATIAL_MUX 0x00200000 -#define WMI_PEER_VHT 0x02000000 -#define WMI_PEER_80MHZ 0x04000000 -#define WMI_PEER_VHT_2G 0x08000000 +struct wmi_peer_flags_map { + u32 auth; + u32 qos; + u32 need_ptk_4_way; + u32 need_gtk_2_way; + u32 apsd; + u32 ht; + u32 bw40; + u32 stbc; + u32 ldbc; + u32 dyn_mimops; + u32 static_mimops; + u32 spatial_mux; + u32 vht; + u32 bw80; + u32 vht_2g; + u32 pmf; +}; + +enum wmi_peer_flags { + WMI_PEER_AUTH = 0x00000001, + WMI_PEER_QOS = 0x00000002, + WMI_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_PEER_APSD = 0x00000800, + WMI_PEER_HT = 0x00001000, + WMI_PEER_40MHZ = 0x00002000, + WMI_PEER_STBC = 0x00008000, + WMI_PEER_LDPC = 0x00010000, + WMI_PEER_DYN_MIMOPS = 0x00020000, + WMI_PEER_STATIC_MIMOPS = 0x00040000, + WMI_PEER_SPATIAL_MUX = 0x00200000, + WMI_PEER_VHT = 0x02000000, + WMI_PEER_80MHZ = 0x04000000, + WMI_PEER_VHT_2G = 0x08000000, + WMI_PEER_PMF = 0x10000000, +}; + +enum wmi_10x_peer_flags { + WMI_10X_PEER_AUTH = 0x00000001, + WMI_10X_PEER_QOS = 0x00000002, + WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10X_PEER_APSD = 0x00000800, + WMI_10X_PEER_HT = 0x00001000, + WMI_10X_PEER_40MHZ = 0x00002000, + WMI_10X_PEER_STBC = 0x00008000, + WMI_10X_PEER_LDPC = 0x00010000, + WMI_10X_PEER_DYN_MIMOPS = 0x00020000, + WMI_10X_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10X_PEER_SPATIAL_MUX = 0x00200000, + WMI_10X_PEER_VHT = 0x02000000, + WMI_10X_PEER_80MHZ = 0x04000000, +}; + +enum wmi_10_2_peer_flags { + WMI_10_2_PEER_AUTH = 0x00000001, + WMI_10_2_PEER_QOS = 0x00000002, + WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10_2_PEER_APSD = 0x00000800, + WMI_10_2_PEER_HT = 0x00001000, + WMI_10_2_PEER_40MHZ = 0x00002000, + WMI_10_2_PEER_STBC = 0x00008000, + WMI_10_2_PEER_LDPC = 0x00010000, + WMI_10_2_PEER_DYN_MIMOPS = 0x00020000, + WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10_2_PEER_SPATIAL_MUX = 0x00200000, + WMI_10_2_PEER_VHT = 0x02000000, + WMI_10_2_PEER_80MHZ = 0x04000000, + WMI_10_2_PEER_VHT_2G = 0x08000000, + WMI_10_2_PEER_PMF = 0x10000000, +}; /* * Peer rate capabilities. @@ -5721,6 +5816,11 @@ struct wmi_10_2_peer_assoc_complete_cmd { __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */ } __packed; +struct wmi_10_4_peer_assoc_complete_cmd { + struct wmi_10_2_peer_assoc_complete_cmd cmd; + __le32 peer_bw_rxnss_override; +} __packed; + struct wmi_peer_assoc_complete_arg { u8 addr[ETH_ALEN]; u32 vdev_id; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 342563a3706f..3d946d8b2db2 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -767,7 +767,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, if (info->flags & IEEE80211_TX_CTL_NO_ACK) flags |= AR5K_TXDESC_NOACK; - rc_flags = info->control.rates[0].flags; + rc_flags = bf->rates[0].flags; hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0); diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 81ac8c59f0ec..7f3f94fbf157 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3930,8 +3930,8 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff; ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff; ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff; - ar->hw.tx_ant = 2; - ar->hw.rx_ant = 2; + ar->hw.tx_ant = 0x3; /* mask, 2 antenna */ + ar->hw.rx_ant = 0x3; } else { ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff; ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff; diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index fffb65b3e652..65c31da43c47 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -2222,8 +2222,9 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, } if (status) { - ath6kl_err("failed to get pending recv messages: %d\n", - status); + if (status != -ECANCELED) + ath6kl_err("failed to get pending recv messages: %d\n", + status); /* cleanup any packets in sync completion queue */ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 6ae0734f86e0..da557dc742e6 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -954,8 +954,10 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name); ret = request_firmware(&fw, filename, ar->dev); - if (ret) + if (ret) { + ath6kl_err("Failed request firmware, rv: %d\n", ret); return ret; + } data = fw->data; len = fw->size; @@ -964,11 +966,15 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) magic_len = strlen(ATH6KL_FIRMWARE_MAGIC) + 1; if (len < magic_len) { + ath6kl_err("Magic length is invalid, len: %zd magic_len: %zd\n", + len, magic_len); ret = -EINVAL; goto out; } if (memcmp(data, ATH6KL_FIRMWARE_MAGIC, magic_len) != 0) { + ath6kl_err("Magic is invalid, magic_len: %zd\n", + magic_len); ret = -EINVAL; goto out; } @@ -987,7 +993,12 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) len -= sizeof(*hdr); data += sizeof(*hdr); + ath6kl_dbg(ATH6KL_DBG_BOOT, "ie-id: %d len: %zd (0x%zx)\n", + ie_id, ie_len, ie_len); + if (len < ie_len) { + ath6kl_err("IE len is invalid, len: %zd ie_len: %zd ie-id: %d\n", + len, ie_len, ie_id); ret = -EINVAL; goto out; } @@ -1008,6 +1019,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL); if (ar->fw_otp == NULL) { + ath6kl_err("fw_otp cannot be allocated\n"); ret = -ENOMEM; goto out; } @@ -1025,6 +1037,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ar->fw = vmalloc(ie_len); if (ar->fw == NULL) { + ath6kl_err("fw storage cannot be allocated, len: %zd\n", ie_len); ret = -ENOMEM; goto out; } @@ -1039,6 +1052,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL); if (ar->fw_patch == NULL) { + ath6kl_err("fw_patch storage cannot be allocated, len: %zd\n", ie_len); ret = -ENOMEM; goto out; } diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index fee0cadb0f5e..40fa915d6f35 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -176,3 +176,14 @@ config ATH9K_HTC_DEBUGFS depends on ATH9K_HTC && DEBUG_FS ---help--- Say Y, if you need access to ath9k_htc's statistics. + +config ATH9K_HWRNG + bool "Random number generator support" + depends on ATH9K && (HW_RANDOM = y || HW_RANDOM = ATH9K) + default y + ---help--- + This option incorporates the ADC register output as a source of + randomness into Linux entropy pool (/dev/urandom and /dev/random) + + Say Y, feeds the entropy directly from the WiFi driver to the input + pool. diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index ecda613c2d54..76f9dc37500b 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -15,6 +15,7 @@ ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o ath9k-$(CONFIG_ATH9K_TX99) += tx99.o ath9k-$(CONFIG_ATH9K_WOW) += wow.o +ath9k-$(CONFIG_ATH9K_HWRNG) += rng.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index b42f4a963ef4..5294595da5a7 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -23,6 +23,7 @@ #include <linux/leds.h> #include <linux/completion.h> #include <linux/time.h> +#include <linux/hw_random.h> #include "common.h" #include "debug.h" @@ -981,6 +982,7 @@ struct ath_softc { struct ath_offchannel offchannel; struct ath_chanctx *next_chan; struct completion go_beacon; + struct timespec last_event_time; #endif unsigned long driver_data; @@ -1040,6 +1042,11 @@ struct ath_softc { u32 wow_intr_before_sleep; bool force_wow; #endif + +#ifdef CONFIG_ATH9K_HWRNG + u32 rng_last; + struct task_struct *rng_task; +#endif }; /********/ @@ -1062,6 +1069,22 @@ static inline int ath9k_tx99_send(struct ath_softc *sc, } #endif /* CONFIG_ATH9K_TX99 */ +/***************************/ +/* Random Number Generator */ +/***************************/ +#ifdef CONFIG_ATH9K_HWRNG +void ath9k_rng_start(struct ath_softc *sc); +void ath9k_rng_stop(struct ath_softc *sc); +#else +static inline void ath9k_rng_start(struct ath_softc *sc) +{ +} + +static inline void ath9k_rng_stop(struct ath_softc *sc) +{ +} +#endif + static inline void ath_read_cachesize(struct ath_common *common, int *csz) { common->bus_ops->read_cachesize(common, csz); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index f50a6bc5d06e..5cf0cd7cb2d1 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -148,7 +148,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, ath_assign_seq(common, skb); - if (vif->p2p) + /* Always assign NOA attr when MCC enabled */ + if (ath9k_is_chanctx_enabled()) ath9k_beacon_add_noa(sc, avp, skb); bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 90f5773a1a61..50e614b915f1 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -226,6 +226,20 @@ static const char *chanctx_state_string(enum ath_chanctx_state state) } } +static const u32 chanctx_event_delta(struct ath_softc *sc) +{ + u64 ms; + struct timespec ts, *old; + + getrawmonotonic(&ts); + old = &sc->last_event_time; + ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000; + ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000; + sc->last_event_time = ts; + + return (u32)ms; +} + void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -356,14 +370,16 @@ static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; + unsigned long timeout; ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000); tsf_time -= ath9k_hw_gettsf32(ah); - tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1; - mod_timer(&sc->sched.timer, jiffies + tsf_time); + timeout = msecs_to_jiffies(tsf_time / 1000) + 1; + mod_timer(&sc->sched.timer, jiffies + timeout); ath_dbg(common, CHAN_CTX, - "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time)); + "Setup chanctx timer with timeout: %d (%d) ms\n", + tsf_time / 1000, jiffies_to_msecs(timeout)); } static void ath_chanctx_handle_bmiss(struct ath_softc *sc, @@ -403,7 +419,7 @@ static void ath_chanctx_offchannel_noa(struct ath_softc *sc, avp->offchannel_duration = sc->sched.offchannel_duration; ath_dbg(common, CHAN_CTX, - "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n", + "offchannel noa_duration: %d, noa_start: %u, noa_index: %d\n", avp->offchannel_duration, avp->offchannel_start, avp->noa_index); @@ -443,7 +459,7 @@ static void ath_chanctx_set_periodic_noa(struct ath_softc *sc, avp->periodic_noa = true; ath_dbg(common, CHAN_CTX, - "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", + "noa_duration: %d, noa_start: %u, noa_index: %d, periodic: %d\n", avp->noa_duration, avp->noa_start, avp->noa_index, @@ -464,7 +480,7 @@ static void ath_chanctx_set_oneshot_noa(struct ath_softc *sc, avp->noa_duration = duration + sc->sched.channel_switch_time; ath_dbg(common, CHAN_CTX, - "oneshot noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", + "oneshot noa_duration: %d, noa_start: %u, noa_index: %d, periodic: %d\n", avp->noa_duration, avp->noa_start, avp->noa_index, @@ -487,10 +503,11 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, spin_lock_bh(&sc->chan_lock); - ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s\n", + ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s, delta: %u ms\n", sc->cur_chan->chandef.center_freq1, chanctx_event_string(ev), - chanctx_state_string(sc->sched.state)); + chanctx_state_string(sc->sched.state), + chanctx_event_delta(sc)); switch (ev) { case ATH_CHANCTX_EVENT_BEACON_PREPARE: @@ -1099,6 +1116,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + skb->priority = 7; skb_set_queue_mapping(skb, IEEE80211_AC_VO); if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { dev_kfree_skb_any(skb); @@ -1401,8 +1419,9 @@ void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx) static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; - s32 tsf, target_tsf; + u32 tsf, target_tsf; if (!avp || !avp->noa.has_next_tsf) return; @@ -1414,11 +1433,17 @@ static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) target_tsf = avp->noa.next_tsf; if (!avp->noa.absent) target_tsf -= ATH_P2P_PS_STOP_TIME; + else + target_tsf += ATH_P2P_PS_STOP_TIME; if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME) target_tsf = tsf + ATH_P2P_PS_STOP_TIME; - ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000); + ath_dbg(common, CHAN_CTX, "%s absent %d tsf 0x%08X next_tsf 0x%08X (%dms)\n", + __func__, avp->noa.absent, tsf, target_tsf, + (target_tsf - tsf) / 1000); + + ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, target_tsf, 1000000); } static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) @@ -1433,6 +1458,10 @@ static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) return; sc->p2p_ps_vif = avp; + + if (sc->ps_flags & PS_BEACON_SYNC) + return; + tsf = ath9k_hw_gettsf32(sc->sc_ah); ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf); ath9k_update_p2p_ps_timer(sc, avp); @@ -1495,6 +1524,8 @@ void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, noa->index = avp->noa_index; noa->oppps_ctwindow = ath9k_get_ctwin(sc, avp); + if (noa->oppps_ctwindow) + noa->oppps_ctwindow |= BIT(7); if (avp->noa_duration) { if (avp->periodic_noa) { @@ -1536,6 +1567,8 @@ void ath9k_p2p_ps_timer(void *priv) tsf = ath9k_hw_gettsf32(sc->sc_ah); if (!avp->noa.absent) tsf += ATH_P2P_PS_STOP_TIME; + else + tsf -= ATH_P2P_PS_STOP_TIME; if (!avp->noa.has_next_tsf || avp->noa.next_tsf - tsf > BIT(31)) @@ -1571,8 +1604,7 @@ void ath9k_p2p_bss_info_changed(struct ath_softc *sc, spin_lock_bh(&sc->sc_pcu_lock); spin_lock_irqsave(&sc->sc_pm_lock, flags); - if (!(sc->ps_flags & PS_BEACON_SYNC)) - ath9k_update_p2p_ps(sc, vif); + ath9k_update_p2p_ps(sc, vif); spin_unlock_irqrestore(&sc->sc_pm_lock, flags); spin_unlock_bh(&sc->sc_pcu_lock); } diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 6ad44470d0f2..01d6d3205a65 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c @@ -18,30 +18,16 @@ #define FUDGE 2 -/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */ -static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu) -{ - u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo; - - tsf_mod = tsf & (BIT(10) - 1); - tsf_hi = tsf >> 32; - tsf_lo = ((u32) tsf) >> 10; - - mod_hi = tsf_hi % div_tu; - mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu; - - return (mod_lo << 10) | tsf_mod; -} - static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf, unsigned int interval) { - unsigned int offset; + unsigned int offset, divisor; tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time); - offset = ath9k_mod_tsf64_tu(tsf, interval); + divisor = TU_TO_USEC(interval); + div_u64_rem(tsf, divisor, &offset); - return (u32) tsf + TU_TO_USEC(interval) - offset; + return (u32) tsf + divisor - offset; } /* diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index cc81482c934d..a7afdeee698c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -138,6 +138,80 @@ bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) return ret; } +int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) +{ + u16 magic; + u16 *eepdata; + int i; + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { + ath_err(common, "Reading Magic # failed\n"); + return -EIO; + } + + if (magic == AR5416_EEPROM_MAGIC) { + *swap_needed = false; + } else if (swab16(magic) == AR5416_EEPROM_MAGIC) { + if (ah->ah_flags & AH_NO_EEP_SWAP) { + ath_info(common, + "Ignoring endianness difference in EEPROM magic bytes.\n"); + + *swap_needed = false; + } else { + *swap_needed = true; + } + } else { + ath_err(common, + "Invalid EEPROM Magic (0x%04x).\n", magic); + return -EINVAL; + } + + eepdata = (u16 *)(&ah->eeprom); + + if (*swap_needed) { + ath_dbg(common, EEPROM, + "EEPROM Endianness is not native.. Changing.\n"); + + for (i = 0; i < size; i++) + eepdata[i] = swab16(eepdata[i]); + } + + return 0; +} + +bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size) +{ + u32 i, sum = 0; + u16 *eepdata = (u16 *)(&ah->eeprom); + struct ath_common *common = ath9k_hw_common(ah); + + for (i = 0; i < size; i++) + sum ^= eepdata[i]; + + if (sum != 0xffff) { + ath_err(common, "Bad EEPROM checksum 0x%x\n", sum); + return false; + } + + return true; +} + +bool ath9k_hw_nvram_check_version(struct ath_hw *ah, int version, int minrev) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (ah->eep_ops->get_eeprom_ver(ah) != version || + ah->eep_ops->get_eeprom_rev(ah) < minrev) { + ath_err(common, "Bad EEPROM VER 0x%04x or REV 0x%04x\n", + ah->eep_ops->get_eeprom_ver(ah), + ah->eep_ops->get_eeprom_rev(ah)); + return false; + } + + return true; +} + void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, u8 *pVpdList, u16 numIntercepts, u8 *pRetVpdList) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 40d4f62d0f16..4465c6566f20 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -664,6 +664,9 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, u16 *indexL, u16 *indexR); bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data); +int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size); +bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size); +bool ath9k_hw_nvram_check_version(struct ath_hw *ah, int version, int minrev); void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, int eep_start_loc, int size); void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 4773da6dc6f2..5da0826bf1be 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -177,74 +177,30 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, } #endif - -#undef SIZE_EEPROM_4K - static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { -#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) - struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; - u16 *eepdata, temp, magic, magic2; - u32 sum = 0, el; - bool need_swap = false; - int i, addr; + u32 el; + bool need_swap; + int i, err; - - if (!ath9k_hw_use_flash(ah)) { - if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, - &magic)) { - ath_err(common, "Reading Magic # failed\n"); - return false; - } - - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); - - if (magic != AR5416_EEPROM_MAGIC) { - magic2 = swab16(magic); - - if (magic2 == AR5416_EEPROM_MAGIC) { - need_swap = true; - eepdata = (u16 *) (&ah->eeprom); - - for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { - temp = swab16(*eepdata); - *eepdata = temp; - eepdata++; - } - } else { - ath_err(common, - "Invalid EEPROM Magic. Endianness mismatch.\n"); - return -EINVAL; - } - } - } - - ath_dbg(common, EEPROM, "need_swap = %s\n", - need_swap ? "True" : "False"); + err = ath9k_hw_nvram_swap_data(ah, &need_swap, SIZE_EEPROM_4K); + if (err) + return err; if (need_swap) - el = swab16(ah->eeprom.map4k.baseEepHeader.length); - else - el = ah->eeprom.map4k.baseEepHeader.length; - - if (el > sizeof(struct ar5416_eeprom_4k)) - el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); + el = swab16(eep->baseEepHeader.length); else - el = el / sizeof(u16); + el = eep->baseEepHeader.length; - eepdata = (u16 *)(&ah->eeprom); - - for (i = 0; i < el; i++) - sum ^= *eepdata++; + el = min(el / sizeof(u16), SIZE_EEPROM_4K); + if (!ath9k_hw_nvram_validate_checksum(ah, el)) + return -EINVAL; if (need_swap) { u32 integer; u16 word; - ath_dbg(common, EEPROM, - "EEPROM Endianness is not native.. Changing\n"); - word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; @@ -283,17 +239,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) } } - if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || - ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { - ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", - sum, ah->eep_ops->get_eeprom_ver(ah)); + if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER, + AR5416_EEP_NO_BACK_VER)) return -EINVAL; - } return 0; -#undef EEPROM_4K_SIZE } +#undef SIZE_EEPROM_4K + static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 6ca33dfde1fd..1a019a39eda1 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -177,59 +177,24 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { - u32 sum = 0, el, integer; - u16 temp, word, magic, magic2, *eepdata; - int i, addr; - bool need_swap = false; + u32 el, integer; + u16 word; + int i, err; + bool need_swap; struct ar9287_eeprom *eep = &ah->eeprom.map9287; - struct ath_common *common = ath9k_hw_common(ah); - - if (!ath9k_hw_use_flash(ah)) { - if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, - &magic)) { - ath_err(common, "Reading Magic # failed\n"); - return false; - } - - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); - - if (magic != AR5416_EEPROM_MAGIC) { - magic2 = swab16(magic); - - if (magic2 == AR5416_EEPROM_MAGIC) { - need_swap = true; - eepdata = (u16 *)(&ah->eeprom); - - for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { - temp = swab16(*eepdata); - *eepdata = temp; - eepdata++; - } - } else { - ath_err(common, - "Invalid EEPROM Magic. Endianness mismatch.\n"); - return -EINVAL; - } - } - } - ath_dbg(common, EEPROM, "need_swap = %s\n", - need_swap ? "True" : "False"); + err = ath9k_hw_nvram_swap_data(ah, &need_swap, SIZE_EEPROM_AR9287); + if (err) + return err; if (need_swap) - el = swab16(ah->eeprom.map9287.baseEepHeader.length); - else - el = ah->eeprom.map9287.baseEepHeader.length; - - if (el > sizeof(struct ar9287_eeprom)) - el = sizeof(struct ar9287_eeprom) / sizeof(u16); + el = swab16(eep->baseEepHeader.length); else - el = el / sizeof(u16); - - eepdata = (u16 *)(&ah->eeprom); + el = eep->baseEepHeader.length; - for (i = 0; i < el; i++) - sum ^= *eepdata++; + el = min(el / sizeof(u16), SIZE_EEPROM_AR9287); + if (!ath9k_hw_nvram_validate_checksum(ah, el)) + return -EINVAL; if (need_swap) { word = swab16(eep->baseEepHeader.length); @@ -270,16 +235,15 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) } } - if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER - || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { - ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", - sum, ah->eep_ops->get_eeprom_ver(ah)); + if (!ath9k_hw_nvram_check_version(ah, AR9287_EEP_VER, + AR5416_EEP_NO_BACK_VER)) return -EINVAL; - } return 0; } +#undef SIZE_EEPROM_AR9287 + static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 056f516bf017..959682f7909c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -126,8 +126,6 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_def_fill_eeprom(ah); } -#undef SIZE_EEPROM_DEF - #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_header *modal_hdr) @@ -257,59 +255,31 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, } #endif - static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct ath_common *common = ath9k_hw_common(ah); - u16 *eepdata, temp, magic; - u32 sum = 0, el; - bool need_swap = false; - int i, addr, size; - - if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { - ath_err(common, "Reading Magic # failed\n"); - return false; - } - - if (swab16(magic) == AR5416_EEPROM_MAGIC && - !(ah->ah_flags & AH_NO_EEP_SWAP)) { - size = sizeof(struct ar5416_eeprom_def); - need_swap = true; - eepdata = (u16 *) (&ah->eeprom); - - for (addr = 0; addr < size / sizeof(u16); addr++) { - temp = swab16(*eepdata); - *eepdata = temp; - eepdata++; - } - } + u32 el; + bool need_swap; + int i, err; - ath_dbg(common, EEPROM, "need_swap = %s\n", - need_swap ? "True" : "False"); + err = ath9k_hw_nvram_swap_data(ah, &need_swap, SIZE_EEPROM_DEF); + if (err) + return err; if (need_swap) - el = swab16(ah->eeprom.def.baseEepHeader.length); + el = swab16(eep->baseEepHeader.length); else - el = ah->eeprom.def.baseEepHeader.length; + el = eep->baseEepHeader.length; - if (el > sizeof(struct ar5416_eeprom_def)) - el = sizeof(struct ar5416_eeprom_def) / sizeof(u16); - else - el = el / sizeof(u16); - - eepdata = (u16 *)(&ah->eeprom); - - for (i = 0; i < el; i++) - sum ^= *eepdata++; + el = min(el / sizeof(u16), SIZE_EEPROM_DEF); + if (!ath9k_hw_nvram_validate_checksum(ah, el)) + return -EINVAL; if (need_swap) { u32 integer, j; u16 word; - ath_dbg(common, EEPROM, - "EEPROM Endianness is not native.. Changing.\n"); - word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; @@ -356,12 +326,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } } - if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || - ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { - ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", - sum, ah->eep_ops->get_eeprom_ver(ah)); + if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER, + AR5416_EEP_NO_BACK_VER)) return -EINVAL; - } /* Enable fixup for AR_AN_TOP2 if necessary */ if ((ah->hw_version.devid == AR9280_DEVID_PCI) && @@ -376,6 +343,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) return 0; } +#undef SIZE_EEPROM_DEF + static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index a680a970b7f7..fe1fd1a5ae15 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -834,7 +834,7 @@ void ath9k_htc_ani_work(struct work_struct *work) if (longcal || shortcal) common->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan, - ah->rxchainmask, longcal); + ah->rxchainmask, longcal) > 0; ath9k_htc_ps_restore(priv); } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 2294709ee8b0..fd85f996c554 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -414,7 +414,7 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, return; } - if (epid >= ENDPOINT_MAX) { + if (epid < 0 || epid >= ENDPOINT_MAX) { if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 41382f89abe1..257f46ed4a04 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2299,10 +2299,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, else nextTbtt = bs->bs_nexttbtt; - ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim); - ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt); - ath_dbg(common, BEACON, "beacon period %d\n", beaconintval); - ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod); + ath_dbg(common, BEACON, "next DTIM %u\n", bs->bs_nextdtim); + ath_dbg(common, BEACON, "next beacon %u\n", nextTbtt); + ath_dbg(common, BEACON, "beacon period %u\n", beaconintval); + ath_dbg(common, BEACON, "DTIM period %u\n", dtimperiod); ENABLE_REGWRITE_BUFFER(ah); @@ -2761,9 +2761,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) ENABLE_REGWRITE_BUFFER(ah); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) - bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER; - REG_WRITE(ah, AR_RX_FILTER, bits); phybits = 0; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 2e2b92ba96b8..ab7a1ac37849 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -828,6 +828,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); if (ath9k_ps_enable) ieee80211_hw_set(hw, SUPPORTS_PS); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index d184e682e636..c1b33fdcca08 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -739,6 +739,8 @@ static int ath9k_start(struct ieee80211_hw *hw) ath9k_ps_restore(sc); + ath9k_rng_start(sc); + return 0; } @@ -828,6 +830,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath9k_deinit_channel_context(sc); + ath9k_rng_stop(sc); + mutex_lock(&sc->mutex); ath_cancel_work(sc); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 994daf6c6297..32160fca876a 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -424,6 +424,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc) AR_SREV_9561(sc->sc_ah)) rfilt |= ATH9K_RX_FILTER_4ADDRESS; + if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah)) + rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER; + if (ath9k_is_chanctx_enabled() && test_bit(ATH_OP_SCANNING, &common->op_flags)) rfilt |= ATH9K_RX_FILTER_BEACON; diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c new file mode 100644 index 000000000000..c9cb2aad7b6f --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/rng.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/hw_random.h> +#include <linux/kthread.h> + +#include "ath9k.h" +#include "hw.h" +#include "ar9003_phy.h" + +#define ATH9K_RNG_BUF_SIZE 320 +#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 320) >> 10) /* quality: 320/1024 */ + +static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size) +{ + int i, j; + u32 v1, v2, rng_last = sc->rng_last; + struct ath_hw *ah = sc->sc_ah; + + ath9k_ps_wakeup(sc); + + REG_RMW_FIELD(ah, AR_PHY_TEST, AR_PHY_TEST_BBB_OBS_SEL, 1); + REG_CLR_BIT(ah, AR_PHY_TEST, AR_PHY_TEST_RX_OBS_SEL_BIT5); + REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_RX_OBS_SEL, 0); + + for (i = 0, j = 0; i < buf_size; i++) { + v1 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff; + v2 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff; + + /* wait for data ready */ + if (v1 && v2 && rng_last != v1 && v1 != v2 && v1 != 0xffff && + v2 != 0xffff) + buf[j++] = (v1 << 16) | v2; + + rng_last = v2; + } + + ath9k_ps_restore(sc); + + sc->rng_last = rng_last; + + return j << 2; +} + +static int ath9k_rng_kthread(void *data) +{ + int bytes_read; + struct ath_softc *sc = data; + u32 *rng_buf; + + rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL); + if (!rng_buf) + goto out; + + while (!kthread_should_stop()) { + bytes_read = ath9k_rng_data_read(sc, rng_buf, + ATH9K_RNG_BUF_SIZE); + if (unlikely(!bytes_read)) { + msleep_interruptible(10); + continue; + } + + /* sleep until entropy bits under write_wakeup_threshold */ + add_hwgenerator_randomness((void *)rng_buf, bytes_read, + ATH9K_RNG_ENTROPY(bytes_read)); + } + + kfree(rng_buf); +out: + sc->rng_task = NULL; + + return 0; +} + +void ath9k_rng_start(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + + if (sc->rng_task) + return; + + if (!AR_SREV_9300_20_OR_LATER(ah)) + return; + + sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng"); + if (IS_ERR(sc->rng_task)) + sc->rng_task = NULL; +} + +void ath9k_rng_stop(struct ath_softc *sc) +{ + if (sc->rng_task) + kthread_stop(sc->rng_task); +} diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3e3dac3d7060..fe795fc5288c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1473,11 +1473,14 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_atx_tid *txtid; struct ath_txq *txq; struct ath_node *an; u8 density; + ath_dbg(common, XMIT, "%s called\n", __func__); + an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); txq = txtid->txq; @@ -1512,10 +1515,13 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *)sta->drv_priv; struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); struct ath_txq *txq = txtid->txq; + ath_dbg(common, XMIT, "%s called\n", __func__); + ath_txq_lock(sc, txq); txtid->active = false; ath_tx_flush_tid(sc, txtid); @@ -1526,11 +1532,14 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, struct ath_node *an) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_atx_tid *tid; struct ath_txq *txq; bool buffered; int tidno; + ath_dbg(common, XMIT, "%s called\n", __func__); + for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { @@ -1555,10 +1564,13 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_atx_tid *tid; struct ath_txq *txq; int tidno; + ath_dbg(common, XMIT, "%s called\n", __func__); + for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { @@ -1579,10 +1591,13 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tidno) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_atx_tid *tid; struct ath_node *an; struct ath_txq *txq; + ath_dbg(common, XMIT, "%s called\n", __func__); + an = (struct ath_node *)sta->drv_priv; tid = ATH_AN_2_TID(an, tidno); txq = tid->txq; @@ -2316,6 +2331,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, queue = ieee80211_is_data_present(hdr->frame_control); + /* If chanctx, queue all null frames while NOA could be there */ + if (ath9k_is_chanctx_enabled() && + ieee80211_is_nullfunc(hdr->frame_control) && + !txctl->force_channel) + queue = true; + /* Force queueing of all frames that belong to a virtual interface on * a different channel context, to ensure that they are sent on the * correct channel. @@ -2894,7 +2915,7 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb, if (skb_headroom(skb) < padsize) { ath_dbg(common, XMIT, "tx99 padding failed\n"); - return -EINVAL; + return -EINVAL; } skb_push(skb, padsize); diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index f8dfa05b290a..8643801f31b6 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -474,36 +474,37 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_desc *dxe = ctl->desc; dma_addr_t dma_addr; struct sk_buff *skb; + int ret = 0, int_mask; + u32 value; + + if (ch->ch_type == WCN36XX_DXE_CH_RX_L) { + value = WCN36XX_DXE_CTRL_RX_L; + int_mask = WCN36XX_DXE_INT_CH1_MASK; + } else { + value = WCN36XX_DXE_CTRL_RX_H; + int_mask = WCN36XX_DXE_INT_CH3_MASK; + } while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; - wcn36xx_dxe_fill_skb(wcn->dev, ctl); - - switch (ch->ch_type) { - case WCN36XX_DXE_CH_RX_L: - dxe->ctrl = WCN36XX_DXE_CTRL_RX_L; - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, - WCN36XX_DXE_INT_CH1_MASK); - break; - case WCN36XX_DXE_CH_RX_H: - dxe->ctrl = WCN36XX_DXE_CTRL_RX_H; - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, - WCN36XX_DXE_INT_CH3_MASK); - break; - default: - wcn36xx_warn("Unknown channel\n"); - } - - dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, - DMA_FROM_DEVICE); - wcn36xx_rx_skb(wcn, skb); + ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl); + if (0 == ret) { + /* new skb allocation ok. Use the new one and queue + * the old one to network system. + */ + dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, + DMA_FROM_DEVICE); + wcn36xx_rx_skb(wcn, skb); + } /* else keep old skb not submitted and use it for rx DMA */ + + dxe->ctrl = value; ctl = ctl->next; dxe = ctl->desc; } + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask); ch->head_blk_ctl = ctl; - return 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index a1f1127d7808..b947de0fb2e5 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -345,6 +345,8 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_DHCP_START_IND = 189, WCN36XX_HAL_DHCP_STOP_IND = 190, + WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, + WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE }; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index c9263e1c75d4..74f56a81ad9a 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -302,6 +302,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len) return 0; } +static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf, + size_t len) +{ + struct wcn36xx_fw_msg_status_rsp_v2 *rsp; + + if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp)) + return wcn36xx_smd_rsp_status_check(buf, len); + + rsp = buf + sizeof(struct wcn36xx_hal_msg_header); + + if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) + return rsp->status; + + return 0; +} + int wcn36xx_smd_load_nv(struct wcn36xx *wcn) { struct nv_data *nv_d; @@ -1582,7 +1598,8 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, wcn36xx_err("Sending hal_remove_bsskey failed\n"); goto out; } - ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf, + wcn->hal_rsp_len); if (ret) { wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret); goto out; @@ -1951,7 +1968,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index) wcn36xx_err("Sending hal_trigger_ba failed\n"); goto out; } - ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf, + wcn->hal_rsp_len); if (ret) { wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret); goto out; @@ -2128,6 +2146,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) complete(&wcn->hal_rsp_compl); break; + case WCN36XX_HAL_COEX_IND: + case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: @@ -2174,6 +2194,9 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg; switch (msg_header->msg_type) { + case WCN36XX_HAL_COEX_IND: + case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: + break; case WCN36XX_HAL_OTA_TX_COMPL_IND: wcn36xx_smd_tx_compl_ind(wcn, hal_ind_msg->msg, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 008d03423dbf..8361f9e3995b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -44,6 +44,15 @@ struct wcn36xx_fw_msg_status_rsp { u32 status; } __packed; +/* wcn3620 returns this for tigger_ba */ + +struct wcn36xx_fw_msg_status_rsp_v2 { + u8 bss_id[6]; + u32 status __packed; + u16 count_following_candidates __packed; + /* candidate list follows */ +}; + struct wcn36xx_hal_ind_msg { struct list_head list; u8 *msg; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 97bc186f9728..a1d10b85989f 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -580,16 +580,10 @@ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, long channel; bool on; - char *kbuf = kmalloc(len + 1, GFP_KERNEL); - - if (!kbuf) - return -ENOMEM; - if (copy_from_user(kbuf, buf, len)) { - kfree(kbuf); - return -EIO; - } + char *kbuf = memdup_user_nul(buf, len); - kbuf[len] = '\0'; + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); rc = kstrtol(kbuf, 0, &channel); kfree(kbuf); if (rc) diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 50c136e843c4..4f2ffa5c6e17 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -394,9 +394,13 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil_fw_core_dump(wil); wil_notify_fw_error(wil); isr &= ~ISR_MISC_FW_ERROR; - wil_fw_error_recovery(wil); + if (wil->platform_ops.notify_crash) { + wil_err(wil, "notify platform driver about FW crash"); + wil->platform_ops.notify_crash(wil->platform_handle); + } else { + wil_fw_error_recovery(wil); + } } - if (isr & ISR_MISC_MBOX_EVT) { wil_dbg_irq(wil, "MBOX event\n"); wmi_recv_cmd(wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index bb69a5949aea..b39f0bfc591e 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -401,20 +401,26 @@ void wil_bcast_fini(struct wil6210_priv *wil) static void wil_connect_worker(struct work_struct *work) { - int rc; + int rc, cid, ringid; struct wil6210_priv *wil = container_of(work, struct wil6210_priv, connect_worker); struct net_device *ndev = wil_to_ndev(wil); - int cid = wil->pending_connect_cid; - int ringid = wil_find_free_vring(wil); + mutex_lock(&wil->mutex); + cid = wil->pending_connect_cid; if (cid < 0) { wil_err(wil, "No connection pending\n"); - return; + goto out; + } + ringid = wil_find_free_vring(wil); + if (ringid < 0) { + wil_err(wil, "No free vring found\n"); + goto out; } - wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid); + wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", + cid, ringid); rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); wil->pending_connect_cid = -1; @@ -424,6 +430,8 @@ static void wil_connect_worker(struct work_struct *work) } else { wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true); } +out: + mutex_unlock(&wil->mutex); } int wil_priv_init(struct wil6210_priv *wil) @@ -773,8 +781,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); - /* prevent NAPI from being scheduled */ + /* prevent NAPI from being scheduled and prevent wmi commands */ + mutex_lock(&wil->wmi_mutex); bitmap_zero(wil->status, wil_status_last); + mutex_unlock(&wil->wmi_mutex); if (wil->scan_request) { wil_dbg_misc(wil, "Abort scan_request 0x%p\n", @@ -977,7 +987,7 @@ int __wil_down(struct wil6210_priv *wil) } mutex_lock(&wil->mutex); - if (!iter) + if (iter < 0) wil_err(wil, "timeout waiting for idle FW/HW\n"); wil_reset(wil, false); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index e3b3c8fb4605..56aaa2d4fb0e 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -183,7 +183,7 @@ void *wil_if_alloc(struct device *dev) netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, WIL6210_NAPI_BUDGET); - netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, + netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, WIL6210_NAPI_BUDGET); netif_tx_stop_all_queues(ndev); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 1a3142c332e1..e36f2a0c8cb6 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -125,11 +125,37 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) return 0; } +static int wil_platform_rop_ramdump(void *wil_handle, void *buf, uint32_t size) +{ + struct wil6210_priv *wil = wil_handle; + + if (!wil) + return -EINVAL; + + return wil_fw_copy_crash_dump(wil, buf, size); +} + +static int wil_platform_rop_fw_recovery(void *wil_handle) +{ + struct wil6210_priv *wil = wil_handle; + + if (!wil) + return -EINVAL; + + wil_fw_error_recovery(wil); + + return 0; +} + static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct wil6210_priv *wil; struct device *dev = &pdev->dev; int rc; + const struct wil_platform_rops rops = { + .ramdump = wil_platform_rop_ramdump, + .fw_recovery = wil_platform_rop_fw_recovery, + }; /* check HW */ dev_info(&pdev->dev, WIL_NAME @@ -154,7 +180,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* rollback to if_free */ wil->platform_handle = - wil_platform_init(&pdev->dev, &wil->platform_ops); + wil_platform_init(&pdev->dev, &wil->platform_ops, &rops, wil); if (!wil->platform_handle) { rc = -ENODEV; wil_err(wil, "wil_platform_init failed\n"); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index e3d1be82f314..32031e7a11d5 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -261,9 +261,19 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, struct wil_tid_ampdu_rx *r) { + int i; + if (!r) return; - wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size); + + /* Do not pass remaining frames to the network stack - it may be + * not expecting to get any more Rx. Rx from here may lead to + * kernel OOPS since some per-socket accounting info was already + * released. + */ + for (i = 0; i < r->buf_size; i++) + kfree_skb(r->reorder_buf[i]); + kfree(r->reorder_buf); kfree(r->reorder_time); kfree(r); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 3bc9bc0efbac..7887e6cfd817 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -160,6 +160,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); + lockdep_assert_held(&wil->mutex); if (tx) { int vring_index = vring - wil->vring_tx; @@ -749,6 +750,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, cmd.vring_cfg.tx_sw_ring.max_mpdu_size); + lockdep_assert_held(&wil->mutex); if (vring->va) { wil_err(wil, "Tx ring [%d] already allocated\n", id); @@ -821,6 +823,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, cmd.vring_cfg.tx_sw_ring.max_mpdu_size); + lockdep_assert_held(&wil->mutex); if (vring->va) { wil_err(wil, "Tx ring [%d] already allocated\n", id); @@ -872,7 +875,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - WARN_ON(!mutex_is_locked(&wil->mutex)); + lockdep_assert_held(&wil->mutex); if (!vring->va) return; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index ade5f3b8274b..235e205ce2bc 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -828,6 +828,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_resume(struct wil6210_priv *wil, bool is_runtime); +int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size); void wil_fw_core_dump(struct wil6210_priv *wil); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index 7e70934990ae..b57d280946e0 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -51,8 +51,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, return 0; } -static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, - u32 size) +int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) { int i; const struct fw_map *map; diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c index 2e831bf20117..4eed05bddb60 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.c +++ b/drivers/net/wireless/ath/wil6210/wil_platform.c @@ -33,7 +33,8 @@ void wil_platform_modexit(void) * It returns a handle which is used with the rest of the API * */ -void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops) +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops, + const struct wil_platform_rops *rops, void *wil_handle) { void *handle = ops; /* to return some non-NULL for 'void' impl. */ diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index d7fa19b7886d..9a949d910343 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,16 +20,48 @@ struct device; /** - * struct wil_platform_ops - wil platform module callbacks + * struct wil_platform_ops - wil platform module calls from this + * driver to platform driver */ struct wil_platform_ops { int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */); int (*suspend)(void *handle); int (*resume)(void *handle); void (*uninit)(void *handle); + int (*notify_crash)(void *handle); }; -void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops); +/** + * struct wil_platform_rops - wil platform module callbacks from + * platform driver to this driver + * @ramdump: store a ramdump from the wil firmware. The platform + * driver may add additional data to the ramdump to + * generate the final crash dump. + * @fw_recovery: start a firmware recovery process. Called as + * part of a crash recovery process which may include other + * related platform subsystems. + */ +struct wil_platform_rops { + int (*ramdump)(void *wil_handle, void *buf, uint32_t size); + int (*fw_recovery)(void *wil_handle); +}; + +/** + * wil_platform_init - initialize the platform driver + * + * @dev - pointer to the wil6210 device + * @ops - structure with platform driver operations. Platform + * driver will fill this structure with function pointers. + * @rops - structure with callbacks from platform driver to + * this driver. The platform driver copies the structure to + * its own storage. Can be NULL if this driver does not + * support crash recovery. + * @wil_handle - context for this driver that will be passed + * when platform driver invokes one of the callbacks in + * rops. May be NULL if rops is NULL. + */ +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops, + const struct wil_platform_rops *rops, void *wil_handle); int __init wil_platform_modinit(void); void wil_platform_modexit(void); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6ed26baca0e5..e3ea74cdd4aa 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -228,6 +228,10 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { + if (!test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "WMI: cannot send command while FW not ready\n"); + return -EAGAIN; + } r->tail = wil_r(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.tail)); if (next_head != r->tail) diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig new file mode 100644 index 000000000000..a43cfd163254 --- /dev/null +++ b/drivers/net/wireless/atmel/Kconfig @@ -0,0 +1,57 @@ +config WLAN_VENDOR_ATMEL + bool "Atmel devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_ATMEL + +config ATMEL + tristate "Atmel at76c50x chipset 802.11b support" + depends on CFG80211 && (PCI || PCMCIA) + select WIRELESS_EXT + select WEXT_PRIV + select FW_LOADER + select CRC32 + ---help--- + A driver 802.11b wireless cards based on the Atmel fast-vnet + chips. This driver supports standard Linux wireless extensions. + + Many cards based on this chipset do not have flash memory + and need their firmware loaded at start-up. If yours is + one of these, you will need to provide a firmware image + to be loaded into the card by the driver. The Atmel + firmware package can be downloaded from + <http://www.thekelleys.org.uk/atmel> + +config PCI_ATMEL + tristate "Atmel at76c506 PCI cards" + depends on ATMEL && PCI + ---help--- + Enable support for PCI and mini-PCI cards containing the + Atmel at76c506 chip. + +config PCMCIA_ATMEL + tristate "Atmel at76c502/at76c504 PCMCIA cards" + depends on ATMEL && PCMCIA + select WIRELESS_EXT + select FW_LOADER + select CRC32 + ---help--- + Enable support for PCMCIA cards containing the + Atmel at76c502 and at76c504 chips. + +config AT76C50X_USB + tristate "Atmel at76c503/at76c505/at76c505a USB cards" + depends on MAC80211 && USB + select FW_LOADER + ---help--- + Enable support for USB Wireless devices using Atmel at76c503, + at76c505 or at76c505a chips. + +endif # WLAN_VENDOR_ATMEL diff --git a/drivers/net/wireless/atmel/Makefile b/drivers/net/wireless/atmel/Makefile new file mode 100644 index 000000000000..e62e345f7af6 --- /dev/null +++ b/drivers/net/wireless/atmel/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_ATMEL) += atmel.o +obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o +obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o + +obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index dab25136214a..dab25136214a 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h index ae03271f878e..ae03271f878e 100644 --- a/drivers/net/wireless/at76c50x-usb.h +++ b/drivers/net/wireless/atmel/at76c50x-usb.h diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel/atmel.c index 6a1f03c271c1..6a1f03c271c1 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c diff --git a/drivers/net/wireless/atmel.h b/drivers/net/wireless/atmel/atmel.h index 96f7318cbb04..96f7318cbb04 100644 --- a/drivers/net/wireless/atmel.h +++ b/drivers/net/wireless/atmel/atmel.h diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c index 7afc9c5329fb..7afc9c5329fb 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel/atmel_cs.c diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c index bcf1f274a251..bcf1f274a251 100644 --- a/drivers/net/wireless/atmel_pci.c +++ b/drivers/net/wireless/atmel/atmel_pci.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h deleted file mode 100644 index 21c7488b4732..000000000000 --- a/drivers/net/wireless/brcm80211/brcmfmac/common.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2014 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef BRCMFMAC_COMMON_H -#define BRCMFMAC_COMMON_H - -extern const u8 ALLFFMAC[ETH_ALEN]; - -/* Sets dongle media info (drv_version, mac address). */ -int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); - -#endif /* BRCMFMAC_COMMON_H */ diff --git a/drivers/net/wireless/broadcom/Kconfig b/drivers/net/wireless/broadcom/Kconfig new file mode 100644 index 000000000000..d3651ceb5046 --- /dev/null +++ b/drivers/net/wireless/broadcom/Kconfig @@ -0,0 +1,18 @@ +config WLAN_VENDOR_BROADCOM + bool "Broadcom devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_BROADCOM + +source "drivers/net/wireless/broadcom/b43/Kconfig" +source "drivers/net/wireless/broadcom/b43legacy/Kconfig" +source "drivers/net/wireless/broadcom/brcm80211/Kconfig" + +endif # WLAN_VENDOR_BROADCOM diff --git a/drivers/net/wireless/broadcom/Makefile b/drivers/net/wireless/broadcom/Makefile new file mode 100644 index 000000000000..9d5ac95710c3 --- /dev/null +++ b/drivers/net/wireless/broadcom/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_B43) += b43/ +obj-$(CONFIG_B43LEGACY) += b43legacy/ + +obj-$(CONFIG_BRCMFMAC) += brcm80211/ +obj-$(CONFIG_BRCMSMAC) += brcm80211/ diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/broadcom/b43/Kconfig index fba856032ca5..fba856032ca5 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/broadcom/b43/Kconfig diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile index ddc4df46656f..ddc4df46656f 100644 --- a/drivers/net/wireless/b43/Makefile +++ b/drivers/net/wireless/broadcom/b43/Makefile diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index 036552439816..036552439816 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/broadcom/b43/bus.c index 17d16a391fe6..17d16a391fe6 100644 --- a/drivers/net/wireless/b43/bus.c +++ b/drivers/net/wireless/broadcom/b43/bus.c diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/broadcom/b43/bus.h index 256c2c17939a..256c2c17939a 100644 --- a/drivers/net/wireless/b43/bus.h +++ b/drivers/net/wireless/broadcom/b43/bus.h diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index b4bcd94aff6c..b4bcd94aff6c 100644 --- a/drivers/net/wireless/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/broadcom/b43/debugfs.h index d05377745011..d05377745011 100644 --- a/drivers/net/wireless/b43/debugfs.h +++ b/drivers/net/wireless/broadcom/b43/debugfs.h diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 6837064908be..6837064908be 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/broadcom/b43/dma.h index df8c8cdcbdb5..df8c8cdcbdb5 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/broadcom/b43/dma.h diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c index d79ab2a227e1..d79ab2a227e1 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/broadcom/b43/leds.c diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/broadcom/b43/leds.h index 32b66d53cdac..32b66d53cdac 100644 --- a/drivers/net/wireless/b43/leds.h +++ b/drivers/net/wireless/broadcom/b43/leds.h diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/broadcom/b43/lo.c index a335f94c72ff..a335f94c72ff 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/broadcom/b43/lo.c diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/broadcom/b43/lo.h index 7b4df3883bc2..7b4df3883bc2 100644 --- a/drivers/net/wireless/b43/lo.h +++ b/drivers/net/wireless/broadcom/b43/lo.h diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index ec013fbd6a81..ec013fbd6a81 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/broadcom/b43/main.h index c46430cc725c..c46430cc725c 100644 --- a/drivers/net/wireless/b43/main.h +++ b/drivers/net/wireless/broadcom/b43/main.h diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/broadcom/b43/phy_a.c index 99c036f5ecb7..99c036f5ecb7 100644 --- a/drivers/net/wireless/b43/phy_a.c +++ b/drivers/net/wireless/broadcom/b43/phy_a.c diff --git a/drivers/net/wireless/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h index f7d0d929a374..f7d0d929a374 100644 --- a/drivers/net/wireless/b43/phy_a.h +++ b/drivers/net/wireless/broadcom/b43/phy_a.h diff --git a/drivers/net/wireless/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c index e75633d67938..e75633d67938 100644 --- a/drivers/net/wireless/b43/phy_ac.c +++ b/drivers/net/wireless/broadcom/b43/phy_ac.c diff --git a/drivers/net/wireless/b43/phy_ac.h b/drivers/net/wireless/broadcom/b43/phy_ac.h index d1ca79e0eb24..d1ca79e0eb24 100644 --- a/drivers/net/wireless/b43/phy_ac.h +++ b/drivers/net/wireless/broadcom/b43/phy_ac.h diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index ec2b9c577b90..ec2b9c577b90 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h index 78d86526799e..78d86526799e 100644 --- a/drivers/net/wireless/b43/phy_common.h +++ b/drivers/net/wireless/broadcom/b43/phy_common.h diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c index 462310e6e88f..462310e6e88f 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/broadcom/b43/phy_g.c diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/broadcom/b43/phy_g.h index 5413c906a3e7..5413c906a3e7 100644 --- a/drivers/net/wireless/b43/phy_g.h +++ b/drivers/net/wireless/broadcom/b43/phy_g.h diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c index bd68945965d6..bd68945965d6 100644 --- a/drivers/net/wireless/b43/phy_ht.c +++ b/drivers/net/wireless/broadcom/b43/phy_ht.c diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/broadcom/b43/phy_ht.h index c086f56ce478..c086f56ce478 100644 --- a/drivers/net/wireless/b43/phy_ht.h +++ b/drivers/net/wireless/broadcom/b43/phy_ht.h diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c index 97461ccf3e1e..97461ccf3e1e 100644 --- a/drivers/net/wireless/b43/phy_lcn.c +++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c diff --git a/drivers/net/wireless/b43/phy_lcn.h b/drivers/net/wireless/broadcom/b43/phy_lcn.h index 6a7092e13fff..6a7092e13fff 100644 --- a/drivers/net/wireless/b43/phy_lcn.h +++ b/drivers/net/wireless/broadcom/b43/phy_lcn.h diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 058a9f232050..058a9f232050 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/broadcom/b43/phy_lp.h index 62737f700cbc..62737f700cbc 100644 --- a/drivers/net/wireless/b43/phy_lp.h +++ b/drivers/net/wireless/broadcom/b43/phy_lp.h diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 9f0bcf3b8414..9f0bcf3b8414 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/broadcom/b43/phy_n.h index a6da2c31a99c..a6da2c31a99c 100644 --- a/drivers/net/wireless/b43/phy_n.h +++ b/drivers/net/wireless/broadcom/b43/phy_n.h diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c index a4ff5e2a42b9..a4ff5e2a42b9 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/broadcom/b43/pio.c diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/broadcom/b43/pio.h index 1e516147424f..1e516147424f 100644 --- a/drivers/net/wireless/b43/pio.h +++ b/drivers/net/wireless/broadcom/b43/pio.h diff --git a/drivers/net/wireless/b43/ppr.c b/drivers/net/wireless/broadcom/b43/ppr.c index 9a770279c415..9a770279c415 100644 --- a/drivers/net/wireless/b43/ppr.c +++ b/drivers/net/wireless/broadcom/b43/ppr.c diff --git a/drivers/net/wireless/b43/ppr.h b/drivers/net/wireless/broadcom/b43/ppr.h index 24d7447e9f01..24d7447e9f01 100644 --- a/drivers/net/wireless/b43/ppr.h +++ b/drivers/net/wireless/broadcom/b43/ppr.h diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/broadcom/b43/radio_2055.c index 5289a18ddd8c..5289a18ddd8c 100644 --- a/drivers/net/wireless/b43/radio_2055.c +++ b/drivers/net/wireless/broadcom/b43/radio_2055.c diff --git a/drivers/net/wireless/b43/radio_2055.h b/drivers/net/wireless/broadcom/b43/radio_2055.h index 67f96122f8d8..67f96122f8d8 100644 --- a/drivers/net/wireless/b43/radio_2055.h +++ b/drivers/net/wireless/broadcom/b43/radio_2055.h diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/broadcom/b43/radio_2056.c index 2ce25607c60d..2ce25607c60d 100644 --- a/drivers/net/wireless/b43/radio_2056.c +++ b/drivers/net/wireless/broadcom/b43/radio_2056.c diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/broadcom/b43/radio_2056.h index 5b86673459fa..5b86673459fa 100644 --- a/drivers/net/wireless/b43/radio_2056.h +++ b/drivers/net/wireless/broadcom/b43/radio_2056.h diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/broadcom/b43/radio_2057.c index ff1e026a61a1..ff1e026a61a1 100644 --- a/drivers/net/wireless/b43/radio_2057.c +++ b/drivers/net/wireless/broadcom/b43/radio_2057.c diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/broadcom/b43/radio_2057.h index 220d080238ff..220d080238ff 100644 --- a/drivers/net/wireless/b43/radio_2057.h +++ b/drivers/net/wireless/broadcom/b43/radio_2057.h diff --git a/drivers/net/wireless/b43/radio_2059.c b/drivers/net/wireless/broadcom/b43/radio_2059.c index a3cf9efd7e21..a3cf9efd7e21 100644 --- a/drivers/net/wireless/b43/radio_2059.c +++ b/drivers/net/wireless/broadcom/b43/radio_2059.c diff --git a/drivers/net/wireless/b43/radio_2059.h b/drivers/net/wireless/broadcom/b43/radio_2059.h index 9e22fb60588b..9e22fb60588b 100644 --- a/drivers/net/wireless/b43/radio_2059.h +++ b/drivers/net/wireless/broadcom/b43/radio_2059.h diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/broadcom/b43/rfkill.c index 70c2fcedd1bb..70c2fcedd1bb 100644 --- a/drivers/net/wireless/b43/rfkill.c +++ b/drivers/net/wireless/broadcom/b43/rfkill.c diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/broadcom/b43/rfkill.h index f046c3ca0519..f046c3ca0519 100644 --- a/drivers/net/wireless/b43/rfkill.h +++ b/drivers/net/wireless/broadcom/b43/rfkill.h diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/broadcom/b43/sdio.c index 59a521800694..59a521800694 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/broadcom/b43/sdio.c diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/broadcom/b43/sdio.h index 1e93926f388f..1e93926f388f 100644 --- a/drivers/net/wireless/b43/sdio.h +++ b/drivers/net/wireless/broadcom/b43/sdio.h diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c index 3190493bd07f..3190493bd07f 100644 --- a/drivers/net/wireless/b43/sysfs.c +++ b/drivers/net/wireless/broadcom/b43/sysfs.c diff --git a/drivers/net/wireless/b43/sysfs.h b/drivers/net/wireless/broadcom/b43/sysfs.h index 12bda9ef1a85..12bda9ef1a85 100644 --- a/drivers/net/wireless/b43/sysfs.h +++ b/drivers/net/wireless/broadcom/b43/sysfs.h diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/broadcom/b43/tables.c index ea288df8aee9..ea288df8aee9 100644 --- a/drivers/net/wireless/b43/tables.c +++ b/drivers/net/wireless/broadcom/b43/tables.c diff --git a/drivers/net/wireless/b43/tables.h b/drivers/net/wireless/broadcom/b43/tables.h index 80e73c7cbac5..80e73c7cbac5 100644 --- a/drivers/net/wireless/b43/tables.h +++ b/drivers/net/wireless/broadcom/b43/tables.h diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c index cff187c5616d..cff187c5616d 100644 --- a/drivers/net/wireless/b43/tables_lpphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c diff --git a/drivers/net/wireless/b43/tables_lpphy.h b/drivers/net/wireless/broadcom/b43/tables_lpphy.h index 84f1d265f657..84f1d265f657 100644 --- a/drivers/net/wireless/b43/tables_lpphy.h +++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.h diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c index b2f0d245bcf3..b2f0d245bcf3 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/broadcom/b43/tables_nphy.h index b51f386db02f..b51f386db02f 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/broadcom/b43/tables_nphy.h diff --git a/drivers/net/wireless/b43/tables_phy_ht.c b/drivers/net/wireless/broadcom/b43/tables_phy_ht.c index 176c49d74ef4..176c49d74ef4 100644 --- a/drivers/net/wireless/b43/tables_phy_ht.c +++ b/drivers/net/wireless/broadcom/b43/tables_phy_ht.c diff --git a/drivers/net/wireless/b43/tables_phy_ht.h b/drivers/net/wireless/broadcom/b43/tables_phy_ht.h index 1b5ef2bc770c..1b5ef2bc770c 100644 --- a/drivers/net/wireless/b43/tables_phy_ht.h +++ b/drivers/net/wireless/broadcom/b43/tables_phy_ht.h diff --git a/drivers/net/wireless/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c index e347b8d80ea4..e347b8d80ea4 100644 --- a/drivers/net/wireless/b43/tables_phy_lcn.c +++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c diff --git a/drivers/net/wireless/b43/tables_phy_lcn.h b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h index caff9db6831f..caff9db6831f 100644 --- a/drivers/net/wireless/b43/tables_phy_lcn.h +++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c index c218c08fb2f5..c218c08fb2f5 100644 --- a/drivers/net/wireless/b43/wa.c +++ b/drivers/net/wireless/broadcom/b43/wa.c diff --git a/drivers/net/wireless/b43/wa.h b/drivers/net/wireless/broadcom/b43/wa.h index e163c5e56e81..e163c5e56e81 100644 --- a/drivers/net/wireless/b43/wa.h +++ b/drivers/net/wireless/broadcom/b43/wa.h diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index 426dc13c44cd..426dc13c44cd 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/broadcom/b43/xmit.h index ba6115308068..ba6115308068 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/broadcom/b43/xmit.h diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/broadcom/b43legacy/Kconfig index 1ffa28835c58..1ffa28835c58 100644 --- a/drivers/net/wireless/b43legacy/Kconfig +++ b/drivers/net/wireless/broadcom/b43legacy/Kconfig diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/broadcom/b43legacy/Makefile index 227a77e84362..227a77e84362 100644 --- a/drivers/net/wireless/b43legacy/Makefile +++ b/drivers/net/wireless/broadcom/b43legacy/Makefile diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h index 482476fdb1f3..482476fdb1f3 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index 090910ea259e..090910ea259e 100644 --- a/drivers/net/wireless/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c diff --git a/drivers/net/wireless/b43legacy/debugfs.h b/drivers/net/wireless/broadcom/b43legacy/debugfs.h index 9ee32158b947..9ee32158b947 100644 --- a/drivers/net/wireless/b43legacy/debugfs.h +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.h diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index f9dd892b9f27..f9dd892b9f27 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/broadcom/b43legacy/dma.h index c3282f906bc7..c3282f906bc7 100644 --- a/drivers/net/wireless/b43legacy/dma.h +++ b/drivers/net/wireless/broadcom/b43legacy/dma.h diff --git a/drivers/net/wireless/b43legacy/ilt.c b/drivers/net/wireless/broadcom/b43legacy/ilt.c index ee5682e54204..ee5682e54204 100644 --- a/drivers/net/wireless/b43legacy/ilt.c +++ b/drivers/net/wireless/broadcom/b43legacy/ilt.c diff --git a/drivers/net/wireless/b43legacy/ilt.h b/drivers/net/wireless/broadcom/b43legacy/ilt.h index 48bcf37eccb8..48bcf37eccb8 100644 --- a/drivers/net/wireless/b43legacy/ilt.h +++ b/drivers/net/wireless/broadcom/b43legacy/ilt.h diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c index fd4565389c77..fd4565389c77 100644 --- a/drivers/net/wireless/b43legacy/leds.c +++ b/drivers/net/wireless/broadcom/b43legacy/leds.c diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/broadcom/b43legacy/leds.h index 9ff6750dc57f..9ff6750dc57f 100644 --- a/drivers/net/wireless/b43legacy/leds.h +++ b/drivers/net/wireless/broadcom/b43legacy/leds.h diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index afc1fb3e38df..afc1fb3e38df 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c diff --git a/drivers/net/wireless/b43legacy/main.h b/drivers/net/wireless/broadcom/b43legacy/main.h index b74a058d7bac..b74a058d7bac 100644 --- a/drivers/net/wireless/b43legacy/main.h +++ b/drivers/net/wireless/broadcom/b43legacy/main.h diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index 995c7d0c212a..995c7d0c212a 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c diff --git a/drivers/net/wireless/b43legacy/phy.h b/drivers/net/wireless/broadcom/b43legacy/phy.h index 831a7a4760e5..831a7a4760e5 100644 --- a/drivers/net/wireless/b43legacy/phy.h +++ b/drivers/net/wireless/broadcom/b43legacy/phy.h diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/broadcom/b43legacy/pio.c index 282eedec675e..282eedec675e 100644 --- a/drivers/net/wireless/b43legacy/pio.c +++ b/drivers/net/wireless/broadcom/b43legacy/pio.c diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/broadcom/b43legacy/pio.h index 8e6773ea6e75..8e6773ea6e75 100644 --- a/drivers/net/wireless/b43legacy/pio.h +++ b/drivers/net/wireless/broadcom/b43legacy/pio.h diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c index 9501420340a9..9501420340a9 100644 --- a/drivers/net/wireless/b43legacy/radio.c +++ b/drivers/net/wireless/broadcom/b43legacy/radio.c diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/broadcom/b43legacy/radio.h index dd2976d1d561..dd2976d1d561 100644 --- a/drivers/net/wireless/b43legacy/radio.h +++ b/drivers/net/wireless/broadcom/b43legacy/radio.h diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/broadcom/b43legacy/rfkill.c index 7c1bdbc02569..7c1bdbc02569 100644 --- a/drivers/net/wireless/b43legacy/rfkill.c +++ b/drivers/net/wireless/broadcom/b43legacy/rfkill.c diff --git a/drivers/net/wireless/b43legacy/rfkill.h b/drivers/net/wireless/broadcom/b43legacy/rfkill.h index 75585571c544..75585571c544 100644 --- a/drivers/net/wireless/b43legacy/rfkill.h +++ b/drivers/net/wireless/broadcom/b43legacy/rfkill.h diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c index 2a1da15c913b..2a1da15c913b 100644 --- a/drivers/net/wireless/b43legacy/sysfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c diff --git a/drivers/net/wireless/b43legacy/sysfs.h b/drivers/net/wireless/broadcom/b43legacy/sysfs.h index 417d509803c7..417d509803c7 100644 --- a/drivers/net/wireless/b43legacy/sysfs.h +++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.h diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c index 34bf3f0b729f..34bf3f0b729f 100644 --- a/drivers/net/wireless/b43legacy/xmit.c +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/broadcom/b43legacy/xmit.h index 289db00a4a7b..289db00a4a7b 100644 --- a/drivers/net/wireless/b43legacy/xmit.h +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.h diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig index ab42b1fea03c..ab42b1fea03c 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig diff --git a/drivers/net/wireless/brcm80211/Makefile b/drivers/net/wireless/broadcom/brcm80211/Makefile index b987920e982e..b987920e982e 100644 --- a/drivers/net/wireless/brcm80211/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/Makefile diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index dc4c75083085..9e4b505ca593 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -16,8 +16,8 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y += \ - -Idrivers/net/wireless/brcm80211/brcmfmac \ - -Idrivers/net/wireless/brcm80211/include + -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \ + -Idrivers/net/wireless/broadcom/brcm80211/include ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 288c84e7c56b..6af658e443e4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -187,7 +187,8 @@ retry: goto retry; if (id != bcdc->reqid) { brcmf_err("%s: unexpected request id %d (expected %d)\n", - brcmf_ifname(drvr, ifidx), id, bcdc->reqid); + brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, + bcdc->reqid); ret = -EINVAL; goto done; } @@ -234,7 +235,8 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, if (id != bcdc->reqid) { brcmf_err("%s: unexpected request id %d (expected %d)\n", - brcmf_ifname(drvr, ifidx), id, bcdc->reqid); + brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, + bcdc->reqid); ret = -EINVAL; goto done; } @@ -298,13 +300,13 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) != BCDC_PROTO_VER) { brcmf_err("%s: non-BCDC packet received, flags 0x%x\n", - brcmf_ifname(drvr, tmp_if->ifidx), h->flags); + brcmf_ifname(tmp_if), h->flags); return -EBADE; } if (h->flags & BCDC_FLAG_SUM_GOOD) { brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n", - brcmf_ifname(drvr, tmp_if->ifidx), h->flags); + brcmf_ifname(tmp_if), h->flags); pktbuf->ip_summed = CHECKSUM_UNNECESSARY; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 6003179c0ceb..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 410a6645d316..53637399bb99 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -47,6 +47,8 @@ #include "debug.h" #include "sdio.h" #include "of.h" +#include "core.h" +#include "common.h" #define SDIOH_API_ACCESS_RETRY_LIMIT 2 @@ -57,7 +59,6 @@ /* Maximum milliseconds to wait for F2 to come up */ #define SDIO_WAIT_F2RDY 3000 -#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ struct brcmf_sdiod_freezer { @@ -68,10 +69,6 @@ struct brcmf_sdiod_freezer { struct completion resumed; }; -static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE; -module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0); -MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]"); - static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) { struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); @@ -890,7 +887,8 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) if (!sdiodev->sg_support) return; - nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz); + nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, + sdiodev->bus_if->drvr->settings->sdiod_txglomsz); nents += (nents >> 4) + 1; WARN_ON(nents > sdiodev->max_segment_count); @@ -902,7 +900,7 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) sdiodev->sg_support = false; } - sdiodev->txglomsz = brcmf_sdiod_txglomsz; + sdiodev->txglomsz = sdiodev->bus_if->drvr->settings->sdiod_txglomsz; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 4e33f96b3dd1..14a70d4b4e86 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -29,7 +29,7 @@ #include "cfg80211.h" /* T1 start SCO/eSCO priority suppression */ -#define BRCMF_BTCOEX_OPPR_WIN_TIME 2000 +#define BRCMF_BTCOEX_OPPR_WIN_TIME msecs_to_jiffies(2000) /* BT registers values during DHCP */ #define BRCMF_BT_DHCP_REG50 0x8022 @@ -314,8 +314,7 @@ static void brcmf_btcoex_handler(struct work_struct *work) } else { btci->timeout -= BRCMF_BTCOEX_OPPR_WIN_TIME; mod_timer(&btci->timer, - jiffies + - msecs_to_jiffies(BRCMF_BTCOEX_OPPR_WIN_TIME)); + jiffies + BRCMF_BTCOEX_OPPR_WIN_TIME); } btci->timer_on = true; break; @@ -328,12 +327,11 @@ static void brcmf_btcoex_handler(struct work_struct *work) /* DHCP is not over yet, start lowering BT priority */ brcmf_dbg(INFO, "DHCP T1:%d expired\n", - BRCMF_BTCOEX_OPPR_WIN_TIME); + jiffies_to_msecs(BRCMF_BTCOEX_OPPR_WIN_TIME)); brcmf_btcoex_boost_wifi(btci, true); btci->bt_state = BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT; - mod_timer(&btci->timer, - jiffies + msecs_to_jiffies(btci->timeout)); + mod_timer(&btci->timer, jiffies + btci->timeout); btci->timer_on = true; break; @@ -477,7 +475,7 @@ int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif, return -EBUSY; /* Start BT timer only for SCO connection */ if (brcmf_btcoex_is_sco_active(ifp)) { - btci->timeout = duration; + btci->timeout = msecs_to_jiffies(duration); btci->vif = vif; brcmf_btcoex_dhcp_start(btci); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h index 19647c68aa9e..19647c68aa9e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 230cad788ace..36093f93bfbe 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -137,7 +137,7 @@ struct brcmf_bus { bool always_use_fws_queue; bool wowl_supported; - struct brcmf_bus_ops *ops; + const struct brcmf_bus_ops *ops; struct brcmf_bus_msgbuf *msgbuf; }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index deb5f78dcacc..7b01e4ddb315 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -91,6 +91,12 @@ #define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 #define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20 +#define BRCMF_SCAN_CHANNEL_TIME 40 +#define BRCMF_SCAN_UNASSOC_TIME 40 +#define BRCMF_SCAN_PASSIVE_TIME 120 + +#define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) + #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) @@ -232,11 +238,6 @@ struct parsed_vndr_ies { struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; }; -static int brcmf_roamoff; -module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); -MODULE_PARM_DESC(roamoff, "do not use internal roaming engine"); - - static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, struct cfg80211_chan_def *ch) { @@ -392,15 +393,23 @@ static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg, { int iftype_num[NUM_NL80211_IFTYPES]; struct brcmf_cfg80211_vif *pos; + bool check_combos = false; + int ret = 0; memset(&iftype_num[0], 0, sizeof(iftype_num)); list_for_each_entry(pos, &cfg->vif_list, list) - if (pos == vif) + if (pos == vif) { iftype_num[new_type]++; - else + } else { + /* concurrent interfaces so need check combinations */ + check_combos = true; iftype_num[pos->wdev.iftype]++; + } - return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num); + if (check_combos) + ret = cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num); + + return ret; } static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg, @@ -559,7 +568,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, /* wait for firmware event */ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, - msecs_to_jiffies(1500)); + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("timeout occurred\n"); @@ -776,7 +785,8 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, s32 ap = 0; s32 err = 0; - brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, type=%d\n", ifp->bsscfgidx, + type); /* WAR: There are a number of p2p interface related problems which * need to be handled initially (before doing the validate). @@ -945,7 +955,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, static s32 brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, - struct cfg80211_scan_request *request, u16 action) + struct cfg80211_scan_request *request) { s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + offsetof(struct brcmf_escan_params_le, params_le); @@ -959,7 +969,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, params_size += sizeof(u32) * ((request->n_channels + 1) / 2); /* Allocate space for populating ssids in struct */ - params_size += sizeof(struct brcmf_ssid) * request->n_ssids; + params_size += sizeof(struct brcmf_ssid_le) * request->n_ssids; } params = kzalloc(params_size, GFP_KERNEL); @@ -970,7 +980,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN); brcmf_escan_prep(cfg, ¶ms->params_le, request); params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); - params->action = cpu_to_le16(action); + params->action = cpu_to_le16(WL_ESCAN_ACTION_START); params->sync_id = cpu_to_le16(0x1234); err = brcmf_fil_iovar_data_set(ifp, "escan", params, params_size); @@ -1012,7 +1022,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy, results->count = 0; results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; - err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START); + err = escan->run(cfg, ifp, request); if (err) brcmf_scan_config_mpc(ifp, 1); return err; @@ -1026,11 +1036,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, struct brcmf_if *ifp = vif->ifp; struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct cfg80211_ssid *ssids; - struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int; u32 passive_scan; bool escan_req; bool spec_scan; s32 err; + struct brcmf_ssid_le ssid_le; u32 SSID_len; brcmf_dbg(SCAN, "START ESCAN\n"); @@ -1083,13 +1093,13 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, } else { brcmf_dbg(SCAN, "ssid \"%s\", ssid_len (%d)\n", ssids->ssid, ssids->ssid_len); - memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); - SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); - sr->ssid_le.SSID_len = cpu_to_le32(0); + memset(&ssid_le, 0, sizeof(ssid_le)); + SSID_len = min_t(u8, sizeof(ssid_le.SSID), ssids->ssid_len); + ssid_le.SSID_len = cpu_to_le32(0); spec_scan = false; if (SSID_len) { - memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len); - sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); + memcpy(ssid_le.SSID, ssids->ssid, SSID_len); + ssid_le.SSID_len = cpu_to_le32(SSID_len); spec_scan = true; } else brcmf_dbg(SCAN, "Broadcast scan\n"); @@ -1102,12 +1112,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, goto scan_out; } brcmf_scan_config_mpc(ifp, 0); - err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, - &sr->ssid_le, sizeof(sr->ssid_le)); + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, &ssid_le, + sizeof(ssid_le)); if (err) { if (err == -EBUSY) brcmf_dbg(INFO, "BUSY: scan for \"%s\" canceled\n", - sr->ssid_le.SSID); + ssid_le.SSID); else brcmf_err("WLC_SCAN error (%d)\n", err); @@ -1260,17 +1270,17 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) brcmf_dbg(TRACE, "Enter\n"); - if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { + if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); err = brcmf_fil_cmd_data_set(vif->ifp, BRCMF_C_DISASSOC, NULL, 0); if (err) { brcmf_err("WLC_DISASSOC failed (%d)\n", err); } - clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); - cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, - true, GFP_KERNEL); - + if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) || + (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) + cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, + true, GFP_KERNEL); } clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); @@ -1291,6 +1301,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, s32 wsec = 0; s32 bcnprd; u16 chanspec; + u32 ssid_len; brcmf_dbg(TRACE, "Enter\n"); if (!check_vif_up(ifp->vif)) @@ -1368,17 +1379,15 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, memset(&join_params, 0, sizeof(struct brcmf_join_params)); /* SSID */ - profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32); - memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len); - memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len); - join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); + ssid_len = min_t(u32, params->ssid_len, IEEE80211_MAX_SSID_LEN); + memcpy(join_params.ssid_le.SSID, params->ssid, ssid_len); + join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len); join_params_size = sizeof(join_params.ssid_le); /* BSSID */ if (params->bssid) { memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); - join_params_size = sizeof(join_params.ssid_le) + - BRCMF_ASSOC_PARAMS_FIXED_SIZE; + join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE; memcpy(profile->bssid, params->bssid, ETH_ALEN); } else { eth_broadcast_addr(join_params.params_le.bssid); @@ -1436,10 +1445,16 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) struct brcmf_if *ifp = netdev_priv(ndev); brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; + if (!check_vif_up(ifp->vif)) { + /* When driver is being unloaded, it can end up here. If an + * error is returned then later on a debug trace in the wireless + * core module will be printed. To avoid this 0 is returned. + */ + return 0; + } brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING); + brcmf_net_setcarrier(ifp, false); brcmf_dbg(TRACE, "Exit\n"); @@ -1728,7 +1743,6 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; struct ieee80211_channel *chan = sme->channel; struct brcmf_join_params join_params; size_t join_params_size; @@ -1739,6 +1753,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_ext_join_params_le *ext_join_params; u16 chanspec; s32 err = 0; + u32 ssid_len; brcmf_dbg(TRACE, "Enter\n"); if (!check_vif_up(ifp->vif)) @@ -1824,15 +1839,6 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, goto done; } - profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID), - (u32)sme->ssid_len); - memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len); - if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { - profile->ssid.SSID[profile->ssid.SSID_len] = 0; - brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID, - profile->ssid.SSID_len); - } - /* Join with specific BSSID and cached SSID * If SSID is zero join based on BSSID only */ @@ -1845,9 +1851,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, err = -ENOMEM; goto done; } - ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); - memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, - profile->ssid.SSID_len); + ssid_len = min_t(u32, sme->ssid_len, IEEE80211_MAX_SSID_LEN); + ext_join_params->ssid_le.SSID_len = cpu_to_le32(ssid_len); + memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, ssid_len); + if (ssid_len < IEEE80211_MAX_SSID_LEN) + brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", + ext_join_params->ssid_le.SSID, ssid_len); /* Set up join scan parameters */ ext_join_params->scan_le.scan_type = -1; @@ -1895,8 +1904,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, memset(&join_params, 0, sizeof(join_params)); join_params_size = sizeof(join_params.ssid_le); - memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len); - join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); + memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid_len); + join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len); if (sme->bssid) memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); @@ -2423,6 +2432,54 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) } static s32 +brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, + struct station_info *sinfo) +{ + struct brcmf_scb_val_le scbval; + struct brcmf_pktcnt_le pktcnt; + s32 err; + u32 rate; + u32 rssi; + + /* Get the current tx rate */ + err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate); + if (err < 0) { + brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err); + return err; + } + sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->txrate.legacy = rate * 5; + + memset(&scbval, 0, sizeof(scbval)); + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, &scbval, + sizeof(scbval)); + if (err) { + brcmf_err("BRCMF_C_GET_RSSI error (%d)\n", err); + return err; + } + rssi = le32_to_cpu(scbval.val); + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->signal = rssi; + + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt)); + if (err) { + brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err); + return err; + } + sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) | + BIT(NL80211_STA_INFO_RX_DROP_MISC) | + BIT(NL80211_STA_INFO_TX_PACKETS) | + BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt); + sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt); + sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt); + sinfo->tx_failed = le32_to_cpu(pktcnt.tx_bad_pkt); + + return 0; +} + +static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_info *sinfo) { @@ -2439,6 +2496,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, if (!check_vif_up(ifp->vif)) return -EIO; + if (brcmf_is_ibssmode(ifp->vif)) + return brcmf_cfg80211_get_station_ibss(ifp, sinfo); + memset(&sta_info_le, 0, sizeof(sta_info_le)); memcpy(&sta_info_le, mac, ETH_ALEN); err = brcmf_fil_iovar_data_get(ifp, "tdls_sta_info", @@ -2691,8 +2751,8 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg) return err; } -static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg, - struct net_device *ndev, const u8 *bssid) +static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, const u8 *bssid) { struct wiphy *wiphy = cfg_to_wiphy(cfg); struct ieee80211_channel *notify_channel; @@ -2737,6 +2797,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg, band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(ch.chnum, band->band); + cfg->channel = freq; notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = le16_to_cpu(bi->capability); @@ -2775,9 +2836,7 @@ CleanUp: static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) { - struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev); struct brcmf_bss_info_le *bi; - struct brcmf_ssid *ssid; const struct brcmf_tlv *tim; u16 beacon_interval; u8 dtim_period; @@ -2789,8 +2848,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, if (brcmf_is_ibssmode(ifp->vif)) return err; - ssid = &profile->ssid; - *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX); @@ -2921,7 +2978,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, status = e->status; if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { - brcmf_err("scan not ready, bssidx=%d\n", ifp->bssidx); + brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx); return -EPERM; } @@ -3013,296 +3070,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg) brcmf_cfg80211_escan_timeout_worker); } -static __always_inline void brcmf_delay(u32 ms) -{ - if (ms < 1000 / HZ) { - cond_resched(); - mdelay(ms); - } else { - msleep(ms); - } -} - -static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4], - u8 *pattern, u32 patternsize, u8 *mask, - u32 packet_offset) -{ - struct brcmf_fil_wowl_pattern_le *filter; - u32 masksize; - u32 patternoffset; - u8 *buf; - u32 bufsize; - s32 ret; - - masksize = (patternsize + 7) / 8; - patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize; - - bufsize = sizeof(*filter) + patternsize + masksize; - buf = kzalloc(bufsize, GFP_KERNEL); - if (!buf) - return -ENOMEM; - filter = (struct brcmf_fil_wowl_pattern_le *)buf; - - memcpy(filter->cmd, cmd, 4); - filter->masksize = cpu_to_le32(masksize); - filter->offset = cpu_to_le32(packet_offset); - filter->patternoffset = cpu_to_le32(patternoffset); - filter->patternsize = cpu_to_le32(patternsize); - filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP); - - if ((mask) && (masksize)) - memcpy(buf + sizeof(*filter), mask, masksize); - if ((pattern) && (patternsize)) - memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize); - - ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize); - - kfree(buf); - return ret; -} - -static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) -{ - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct net_device *ndev = cfg_to_ndev(cfg); - struct brcmf_if *ifp = netdev_priv(ndev); - - brcmf_dbg(TRACE, "Enter\n"); - - if (cfg->wowl_enabled) { - brcmf_configure_arp_offload(ifp, true); - brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, - cfg->pre_wowl_pmmode); - brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); - brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); - cfg->wowl_enabled = false; - } - return 0; -} - -static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, - struct brcmf_if *ifp, - struct cfg80211_wowlan *wowl) -{ - u32 wowl_config; - u32 i; - - brcmf_dbg(TRACE, "Suspend, wowl config.\n"); - - brcmf_configure_arp_offload(ifp, false); - brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode); - brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); - - wowl_config = 0; - if (wowl->disconnect) - wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR; - if (wowl->magic_pkt) - wowl_config |= BRCMF_WOWL_MAGIC; - if ((wowl->patterns) && (wowl->n_patterns)) { - wowl_config |= BRCMF_WOWL_NET; - for (i = 0; i < wowl->n_patterns; i++) { - brcmf_config_wowl_pattern(ifp, "add", - (u8 *)wowl->patterns[i].pattern, - wowl->patterns[i].pattern_len, - (u8 *)wowl->patterns[i].mask, - wowl->patterns[i].pkt_offset); - } - } - brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config); - brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1); - brcmf_bus_wowl_config(cfg->pub->bus_if, true); - cfg->wowl_enabled = true; -} - -static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, - struct cfg80211_wowlan *wowl) -{ - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct net_device *ndev = cfg_to_ndev(cfg); - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_cfg80211_vif *vif; - - brcmf_dbg(TRACE, "Enter\n"); - - /* if the primary net_device is not READY there is nothing - * we can do but pray resume goes smoothly. - */ - if (!check_vif_up(ifp->vif)) - goto exit; - - /* end any scanning */ - if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) - brcmf_abort_scanning(cfg); - - if (wowl == NULL) { - brcmf_bus_wowl_config(cfg->pub->bus_if, false); - list_for_each_entry(vif, &cfg->vif_list, list) { - if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) - continue; - /* While going to suspend if associated with AP - * disassociate from AP to save power while system is - * in suspended state - */ - brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED); - /* Make sure WPA_Supplicant receives all the event - * generated due to DISASSOC call to the fw to keep - * the state fw and WPA_Supplicant state consistent - */ - brcmf_delay(500); - } - /* Configure MPC */ - brcmf_set_mpc(ifp, 1); - - } else { - /* Configure WOWL paramaters */ - brcmf_configure_wowl(cfg, ifp, wowl); - } - -exit: - brcmf_dbg(TRACE, "Exit\n"); - /* clear any scanning activity */ - cfg->scan_status = 0; - return 0; -} - -static __used s32 -brcmf_update_pmklist(struct net_device *ndev, - struct brcmf_cfg80211_pmk_list *pmk_list, s32 err) -{ - int i, j; - u32 pmkid_len; - - pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid); - - brcmf_dbg(CONN, "No of elements %d\n", pmkid_len); - for (i = 0; i < pmkid_len; i++) { - brcmf_dbg(CONN, "PMKID[%d]: %pM =\n", i, - &pmk_list->pmkids.pmkid[i].BSSID); - for (j = 0; j < WLAN_PMKID_LEN; j++) - brcmf_dbg(CONN, "%02x\n", - pmk_list->pmkids.pmkid[i].PMKID[j]); - } - - if (!err) - brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info", - (char *)pmk_list, sizeof(*pmk_list)); - - return err; -} - -static s32 -brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_pmksa *pmksa) -{ - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct brcmf_if *ifp = netdev_priv(ndev); - struct pmkid_list *pmkids = &cfg->pmk_list->pmkids; - s32 err = 0; - u32 pmkid_len, i; - - brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; - - pmkid_len = le32_to_cpu(pmkids->npmkid); - for (i = 0; i < pmkid_len; i++) - if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN)) - break; - if (i < WL_NUM_PMKIDS_MAX) { - memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN); - memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); - if (i == pmkid_len) { - pmkid_len++; - pmkids->npmkid = cpu_to_le32(pmkid_len); - } - } else - err = -EINVAL; - - brcmf_dbg(CONN, "set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", - pmkids->pmkid[pmkid_len].BSSID); - for (i = 0; i < WLAN_PMKID_LEN; i++) - brcmf_dbg(CONN, "%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); - - err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); - - brcmf_dbg(TRACE, "Exit\n"); - return err; -} - -static s32 -brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_pmksa *pmksa) -{ - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct brcmf_if *ifp = netdev_priv(ndev); - struct pmkid_list pmkid; - s32 err = 0; - u32 pmkid_len, i; - - brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; - - memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN); - memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); - - brcmf_dbg(CONN, "del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", - &pmkid.pmkid[0].BSSID); - for (i = 0; i < WLAN_PMKID_LEN; i++) - brcmf_dbg(CONN, "%02x\n", pmkid.pmkid[0].PMKID[i]); - - pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid); - for (i = 0; i < pmkid_len; i++) - if (!memcmp - (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID, - ETH_ALEN)) - break; - - if ((pmkid_len > 0) - && (i < pmkid_len)) { - memset(&cfg->pmk_list->pmkids.pmkid[i], 0, - sizeof(struct pmkid)); - for (; i < (pmkid_len - 1); i++) { - memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, - &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID, - ETH_ALEN); - memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, - &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID, - WLAN_PMKID_LEN); - } - cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1); - } else - err = -EINVAL; - - err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); - - brcmf_dbg(TRACE, "Exit\n"); - return err; - -} - -static s32 -brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) -{ - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct brcmf_if *ifp = netdev_priv(ndev); - s32 err = 0; - - brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; - - memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list)); - err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); - - brcmf_dbg(TRACE, "Exit\n"); - return err; - -} - -/* - * PFN result doesn't have all the info which are - * required by the supplicant +/* PFN result doesn't have all the info which are required by the supplicant * (For e.g IEs) Do a target Escan so that sched scan results are reported * via wl_inform_single_bss in the required format. Escan does require the * scan request in the form of cfg80211_scan_request. For timebeing, create @@ -3336,8 +3104,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, result_count = le32_to_cpu(pfn_result->count); status = le32_to_cpu(pfn_result->status); - /* - * PFN event is limited to fit 512 bytes so we may get + /* PFN event is limited to fit 512 bytes so we may get * multiple NET_FOUND events. For now place a warning here. */ WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE); @@ -3439,9 +3206,14 @@ static int brcmf_dev_pno_clean(struct net_device *ndev) return ret; } -static int brcmf_dev_pno_config(struct net_device *ndev) +static int brcmf_dev_pno_config(struct brcmf_if *ifp, + struct cfg80211_sched_scan_request *request) { struct brcmf_pno_param_le pfn_param; + struct brcmf_pno_macaddr_le pfn_mac; + s32 err; + u8 *mac_mask; + int i; memset(&pfn_param, 0, sizeof(pfn_param)); pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION); @@ -3454,8 +3226,37 @@ static int brcmf_dev_pno_config(struct net_device *ndev) /* set up pno scan fr */ pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME); - return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set", - &pfn_param, sizeof(pfn_param)); + err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param, + sizeof(pfn_param)); + if (err) { + brcmf_err("pfn_set failed, err=%d\n", err); + return err; + } + + /* Find out if mac randomization should be turned on */ + if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) + return 0; + + pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER; + pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC; + + memcpy(pfn_mac.mac, request->mac_addr, ETH_ALEN); + mac_mask = request->mac_addr_mask; + for (i = 0; i < ETH_ALEN; i++) { + pfn_mac.mac[i] &= mac_mask[i]; + pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]); + } + /* Clear multi bit */ + pfn_mac.mac[0] &= 0xFE; + /* Set locally administered */ + pfn_mac.mac[0] |= 0x02; + + err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac, + sizeof(pfn_mac)); + if (err) + brcmf_err("pfn_macaddr failed, err=%d\n", err); + + return err; } static int @@ -3493,9 +3294,8 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n", request->ssids[i].ssid); - /* - * match_set ssids is a supert set of n_ssid list, - * so we need not add these set seperately. + /* match_set ssids is a supert set of n_ssid list, + * so we need not add these set separately. */ } } @@ -3509,11 +3309,8 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, } /* configure pno */ - ret = brcmf_dev_pno_config(ndev); - if (ret < 0) { - brcmf_err("PNO setup failed!! ret=%d\n", ret); + if (brcmf_dev_pno_config(ifp, request)) return -EINVAL; - } /* configure each match set */ for (i = 0; i < request->n_match_sets; i++) { @@ -3563,6 +3360,425 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy, return 0; } +static __always_inline void brcmf_delay(u32 ms) +{ + if (ms < 1000 / HZ) { + cond_resched(); + mdelay(ms); + } else { + msleep(ms); + } +} + +static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4], + u8 *pattern, u32 patternsize, u8 *mask, + u32 packet_offset) +{ + struct brcmf_fil_wowl_pattern_le *filter; + u32 masksize; + u32 patternoffset; + u8 *buf; + u32 bufsize; + s32 ret; + + masksize = (patternsize + 7) / 8; + patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize; + + bufsize = sizeof(*filter) + patternsize + masksize; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + filter = (struct brcmf_fil_wowl_pattern_le *)buf; + + memcpy(filter->cmd, cmd, 4); + filter->masksize = cpu_to_le32(masksize); + filter->offset = cpu_to_le32(packet_offset); + filter->patternoffset = cpu_to_le32(patternoffset); + filter->patternsize = cpu_to_le32(patternsize); + filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP); + + if ((mask) && (masksize)) + memcpy(buf + sizeof(*filter), mask, masksize); + if ((pattern) && (patternsize)) + memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize); + + ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize); + + kfree(buf); + return ret; +} + +static s32 +brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, + void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_pno_scanresults_le *pfn_result; + struct brcmf_pno_net_info_le *netinfo; + + brcmf_dbg(SCAN, "Enter\n"); + + pfn_result = (struct brcmf_pno_scanresults_le *)data; + + if (e->event_code == BRCMF_E_PFN_NET_LOST) { + brcmf_dbg(SCAN, "PFN NET LOST event. Ignore\n"); + return 0; + } + + if (le32_to_cpu(pfn_result->count) < 1) { + brcmf_err("Invalid result count, expected 1 (%d)\n", + le32_to_cpu(pfn_result->count)); + return -EINVAL; + } + + data += sizeof(struct brcmf_pno_scanresults_le); + netinfo = (struct brcmf_pno_net_info_le *)data; + memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); + cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; + cfg->wowl.nd->n_channels = 1; + cfg->wowl.nd->channels[0] = + ieee80211_channel_to_frequency(netinfo->channel, + netinfo->channel <= CH_MAX_2G_CHANNEL ? + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); + cfg->wowl.nd_info->n_matches = 1; + cfg->wowl.nd_info->matches[0] = cfg->wowl.nd; + + /* Inform (the resume task) that the net detect information was recvd */ + cfg->wowl.nd_data_completed = true; + wake_up(&cfg->wowl.nd_data_wait); + + return 0; +} + +#ifdef CONFIG_PM + +static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_wowl_wakeind_le wake_ind_le; + struct cfg80211_wowlan_wakeup wakeup_data; + struct cfg80211_wowlan_wakeup *wakeup; + u32 wakeind; + s32 err; + int timeout; + + err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le, + sizeof(wake_ind_le)); + if (err) { + brcmf_err("Get wowl_wakeind failed, err = %d\n", err); + return; + } + + wakeind = le32_to_cpu(wake_ind_le.ucode_wakeind); + if (wakeind & (BRCMF_WOWL_MAGIC | BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | + BRCMF_WOWL_RETR | BRCMF_WOWL_NET | + BRCMF_WOWL_PFN_FOUND)) { + wakeup = &wakeup_data; + memset(&wakeup_data, 0, sizeof(wakeup_data)); + wakeup_data.pattern_idx = -1; + + if (wakeind & BRCMF_WOWL_MAGIC) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_MAGIC\n"); + wakeup_data.magic_pkt = true; + } + if (wakeind & BRCMF_WOWL_DIS) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_DIS\n"); + wakeup_data.disconnect = true; + } + if (wakeind & BRCMF_WOWL_BCN) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_BCN\n"); + wakeup_data.disconnect = true; + } + if (wakeind & BRCMF_WOWL_RETR) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_RETR\n"); + wakeup_data.disconnect = true; + } + if (wakeind & BRCMF_WOWL_NET) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_NET\n"); + /* For now always map to pattern 0, no API to get + * correct information available at the moment. + */ + wakeup_data.pattern_idx = 0; + } + if (wakeind & BRCMF_WOWL_PFN_FOUND) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_PFN_FOUND\n"); + timeout = wait_event_timeout(cfg->wowl.nd_data_wait, + cfg->wowl.nd_data_completed, + BRCMF_ND_INFO_TIMEOUT); + if (!timeout) + brcmf_err("No result for wowl net detect\n"); + else + wakeup_data.net_detect = cfg->wowl.nd_info; + } + } else { + wakeup = NULL; + } + cfg80211_report_wowlan_wakeup(&ifp->vif->wdev, wakeup, GFP_KERNEL); +} + +#else + +static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) +{ +} + +#endif /* CONFIG_PM */ + +static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(ndev); + + brcmf_dbg(TRACE, "Enter\n"); + + if (cfg->wowl.active) { + brcmf_report_wowl_wakeind(wiphy, ifp); + brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); + brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); + brcmf_configure_arp_offload(ifp, true); + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, + cfg->wowl.pre_pmmode); + cfg->wowl.active = false; + if (cfg->wowl.nd_enabled) { + brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev); + brcmf_fweh_unregister(cfg->pub, BRCMF_E_PFN_NET_FOUND); + brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, + brcmf_notify_sched_scan_results); + cfg->wowl.nd_enabled = false; + } + } + return 0; +} + +static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp, + struct cfg80211_wowlan *wowl) +{ + u32 wowl_config; + u32 i; + + brcmf_dbg(TRACE, "Suspend, wowl config.\n"); + + brcmf_configure_arp_offload(ifp, false); + brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->wowl.pre_pmmode); + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); + + wowl_config = 0; + if (wowl->disconnect) + wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR; + if (wowl->magic_pkt) + wowl_config |= BRCMF_WOWL_MAGIC; + if ((wowl->patterns) && (wowl->n_patterns)) { + wowl_config |= BRCMF_WOWL_NET; + for (i = 0; i < wowl->n_patterns; i++) { + brcmf_config_wowl_pattern(ifp, "add", + (u8 *)wowl->patterns[i].pattern, + wowl->patterns[i].pattern_len, + (u8 *)wowl->patterns[i].mask, + wowl->patterns[i].pkt_offset); + } + } + if (wowl->nd_config) { + brcmf_cfg80211_sched_scan_start(cfg->wiphy, ifp->ndev, + wowl->nd_config); + wowl_config |= BRCMF_WOWL_PFN_FOUND; + + cfg->wowl.nd_data_completed = false; + cfg->wowl.nd_enabled = true; + /* Now reroute the event for PFN to the wowl function. */ + brcmf_fweh_unregister(cfg->pub, BRCMF_E_PFN_NET_FOUND); + brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, + brcmf_wowl_nd_results); + } + if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) + wowl_config |= BRCMF_WOWL_UNASSOC; + + brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear")); + brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config); + brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1); + brcmf_bus_wowl_config(cfg->pub->bus_if, true); + cfg->wowl.active = true; +} + +static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wowl) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_cfg80211_vif *vif; + + brcmf_dbg(TRACE, "Enter\n"); + + /* if the primary net_device is not READY there is nothing + * we can do but pray resume goes smoothly. + */ + if (!check_vif_up(ifp->vif)) + goto exit; + + /* Stop scheduled scan */ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) + brcmf_cfg80211_sched_scan_stop(wiphy, ndev); + + /* end any scanning */ + if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) + brcmf_abort_scanning(cfg); + + if (wowl == NULL) { + brcmf_bus_wowl_config(cfg->pub->bus_if, false); + list_for_each_entry(vif, &cfg->vif_list, list) { + if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) + continue; + /* While going to suspend if associated with AP + * disassociate from AP to save power while system is + * in suspended state + */ + brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED); + /* Make sure WPA_Supplicant receives all the event + * generated due to DISASSOC call to the fw to keep + * the state fw and WPA_Supplicant state consistent + */ + brcmf_delay(500); + } + /* Configure MPC */ + brcmf_set_mpc(ifp, 1); + + } else { + /* Configure WOWL paramaters */ + brcmf_configure_wowl(cfg, ifp, wowl); + } + +exit: + brcmf_dbg(TRACE, "Exit\n"); + /* clear any scanning activity */ + cfg->scan_status = 0; + return 0; +} + +static __used s32 +brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) +{ + struct brcmf_pmk_list_le *pmk_list; + int i; + u32 npmk; + s32 err; + + pmk_list = &cfg->pmk_list; + npmk = le32_to_cpu(pmk_list->npmk); + + brcmf_dbg(CONN, "No of elements %d\n", npmk); + for (i = 0; i < npmk; i++) + brcmf_dbg(CONN, "PMK[%d]: %pM\n", i, &pmk_list->pmk[i].bssid); + + err = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list, + sizeof(*pmk_list)); + + return err; +} + +static s32 +brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_pmksa *pmksa) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0]; + s32 err; + u32 npmk, i; + + brcmf_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + npmk = le32_to_cpu(cfg->pmk_list.npmk); + for (i = 0; i < npmk; i++) + if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN)) + break; + if (i < BRCMF_MAXPMKID) { + memcpy(pmk[i].bssid, pmksa->bssid, ETH_ALEN); + memcpy(pmk[i].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); + if (i == npmk) { + npmk++; + cfg->pmk_list.npmk = cpu_to_le32(npmk); + } + } else { + brcmf_err("Too many PMKSA entries cached %d\n", npmk); + return -EINVAL; + } + + brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[npmk].bssid); + for (i = 0; i < WLAN_PMKID_LEN; i += 4) + brcmf_dbg(CONN, "%02x %02x %02x %02x\n", pmk[npmk].pmkid[i], + pmk[npmk].pmkid[i + 1], pmk[npmk].pmkid[i + 2], + pmk[npmk].pmkid[i + 3]); + + err = brcmf_update_pmklist(cfg, ifp); + + brcmf_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_pmksa *pmksa) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0]; + s32 err; + u32 npmk, i; + + brcmf_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid); + + npmk = le32_to_cpu(cfg->pmk_list.npmk); + for (i = 0; i < npmk; i++) + if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN)) + break; + + if ((npmk > 0) && (i < npmk)) { + for (; i < (npmk - 1); i++) { + memcpy(&pmk[i].bssid, &pmk[i + 1].bssid, ETH_ALEN); + memcpy(&pmk[i].pmkid, &pmk[i + 1].pmkid, + WLAN_PMKID_LEN); + } + memset(&pmk[i], 0, sizeof(*pmk)); + cfg->pmk_list.npmk = cpu_to_le32(npmk - 1); + } else { + brcmf_err("Cache entry not found\n"); + return -EINVAL; + } + + err = brcmf_update_pmklist(cfg, ifp); + + brcmf_dbg(TRACE, "Exit\n"); + return err; + +} + +static s32 +brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(ndev); + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + memset(&cfg->pmk_list, 0, sizeof(cfg->pmk_list)); + err = brcmf_update_pmklist(cfg, ifp); + + brcmf_dbg(TRACE, "Exit\n"); + return err; + +} + static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) { s32 err; @@ -3877,7 +4093,8 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, ifp = vif->ifp; saved_ie = &vif->saved_ie; - brcmf_dbg(TRACE, "bssidx %d, pktflag : 0x%02X\n", ifp->bssidx, pktflag); + brcmf_dbg(TRACE, "bsscfgidx %d, pktflag : 0x%02X\n", ifp->bsscfgidx, + pktflag); iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (!iovar_ie_buf) return -ENOMEM; @@ -4183,7 +4400,9 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } } - if (dev_role == NL80211_IFTYPE_AP) { + if ((dev_role == NL80211_IFTYPE_AP) && + ((ifp->ifidx == 0) || + !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); if (err < 0) { brcmf_err("BRCMF_C_DOWN error %d\n", err); @@ -4239,7 +4458,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_err("setting ssid failed %d\n", err); goto exit; } - bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); bss_enable.enable = cpu_to_le32(1); err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, sizeof(bss_enable)); @@ -4306,7 +4525,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) if (err < 0) brcmf_err("BRCMF_C_UP error %d\n", err); } else { - bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); bss_enable.enable = cpu_to_le32(0); err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, sizeof(bss_enable)); @@ -4700,7 +4919,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, vif->wdev.iftype = type; vif->pm_block = pm_block; - vif->roam_off = -1; brcmf_init_prof(&vif->profile); @@ -5016,9 +5234,9 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, } else if (brcmf_is_linkup(e)) { brcmf_dbg(CONN, "Linkup\n"); if (brcmf_is_ibssmode(ifp->vif)) { + brcmf_inform_ibss(cfg, ndev, e->addr); chan = ieee80211_get_channel(cfg->wiphy, cfg->channel); memcpy(profile->bssid, e->addr, ETH_ALEN); - wl_inform_ibss(cfg, ndev, e->addr); cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL); clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); @@ -5031,12 +5249,13 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, brcmf_dbg(CONN, "Linkdown\n"); if (!brcmf_is_ibssmode(ifp->vif)) { brcmf_bss_connect_done(cfg, ndev, e, false); + brcmf_link_down(ifp->vif, + brcmf_map_fw_linkdown_reason(e)); + brcmf_init_prof(ndev_to_prof(ndev)); + if (ndev != cfg_to_ndev(cfg)) + complete(&cfg->vif_disabled); + brcmf_net_setcarrier(ifp, false); } - brcmf_link_down(ifp->vif, brcmf_map_fw_linkdown_reason(e)); - brcmf_init_prof(ndev_to_prof(ndev)); - if (ndev != cfg_to_ndev(cfg)) - complete(&cfg->vif_disabled); - brcmf_net_setcarrier(ifp, false); } else if (brcmf_is_nonetwork(cfg, e)) { if (brcmf_is_ibssmode(ifp->vif)) clear_bit(BRCMF_VIF_STATUS_CONNECTING, @@ -5092,9 +5311,9 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; struct brcmf_cfg80211_vif *vif; - brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n", + brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfgidx %u\n", ifevent->action, ifevent->flags, ifevent->ifidx, - ifevent->bssidx); + ifevent->bsscfgidx); mutex_lock(&event->vif_event_lock); event->action = ifevent->action; @@ -5144,7 +5363,6 @@ static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf) conf->rts_threshold = (u32)-1; conf->retry_short = (u32)-1; conf->retry_long = (u32)-1; - conf->tx_power = -1; } static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg) @@ -5191,8 +5409,10 @@ static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) cfg->escan_ioctl_buf = NULL; kfree(cfg->extra_buf); cfg->extra_buf = NULL; - kfree(cfg->pmk_list); - cfg->pmk_list = NULL; + kfree(cfg->wowl.nd); + cfg->wowl.nd = NULL; + kfree(cfg->wowl.nd_info); + cfg->wowl.nd_info = NULL; } static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) @@ -5206,8 +5426,13 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (!cfg->extra_buf) goto init_priv_mem_out; - cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL); - if (!cfg->pmk_list) + cfg->wowl.nd = kzalloc(sizeof(*cfg->wowl.nd) + sizeof(u32), GFP_KERNEL); + if (!cfg->wowl.nd) + goto init_priv_mem_out; + cfg->wowl.nd_info = kzalloc(sizeof(*cfg->wowl.nd_info) + + sizeof(struct cfg80211_wowlan_nd_match *), + GFP_KERNEL); + if (!cfg->wowl.nd_info) goto init_priv_mem_out; return 0; @@ -5250,35 +5475,34 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event) mutex_init(&event->vif_event_lock); } -static s32 -brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout) +static s32 brcmf_dongle_roam(struct brcmf_if *ifp) { - s32 err = 0; + s32 err; + u32 bcn_timeout; __le32 roamtrigger[2]; __le32 roam_delta[2]; - /* - * Setup timeout if Beacons are lost and roam is - * off to report link down - */ - if (brcmf_roamoff) { - err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout); - if (err) { - brcmf_err("bcn_timeout error (%d)\n", err); - goto dongle_rom_out; - } + /* Configure beacon timeout value based upon roaming setting */ + if (ifp->drvr->settings->roamoff) + bcn_timeout = BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_OFF; + else + bcn_timeout = BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON; + err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout); + if (err) { + brcmf_err("bcn_timeout error (%d)\n", err); + goto roam_setup_done; } - /* - * Enable/Disable built-in roaming to allow supplicant - * to take care of roaming + /* Enable/Disable built-in roaming to allow supplicant to take care of + * roaming. */ brcmf_dbg(INFO, "Internal Roaming = %s\n", - brcmf_roamoff ? "Off" : "On"); - err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff)); + ifp->drvr->settings->roamoff ? "Off" : "On"); + err = brcmf_fil_iovar_int_set(ifp, "roam_off", + ifp->drvr->settings->roamoff); if (err) { brcmf_err("roam_off error (%d)\n", err); - goto dongle_rom_out; + goto roam_setup_done; } roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL); @@ -5287,7 +5511,7 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout) (void *)roamtrigger, sizeof(roamtrigger)); if (err) { brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err); - goto dongle_rom_out; + goto roam_setup_done; } roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA); @@ -5296,45 +5520,35 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout) (void *)roam_delta, sizeof(roam_delta)); if (err) { brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err); - goto dongle_rom_out; + goto roam_setup_done; } -dongle_rom_out: +roam_setup_done: return err; } static s32 -brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time, - s32 scan_unassoc_time, s32 scan_passive_time) +brcmf_dongle_scantime(struct brcmf_if *ifp) { s32 err = 0; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, - scan_assoc_time); + BRCMF_SCAN_CHANNEL_TIME); if (err) { - if (err == -EOPNOTSUPP) - brcmf_dbg(INFO, "Scan assoc time is not supported\n"); - else - brcmf_err("Scan assoc time error (%d)\n", err); + brcmf_err("Scan assoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME, - scan_unassoc_time); + BRCMF_SCAN_UNASSOC_TIME); if (err) { - if (err == -EOPNOTSUPP) - brcmf_dbg(INFO, "Scan unassoc time is not supported\n"); - else - brcmf_err("Scan unassoc time error (%d)\n", err); + brcmf_err("Scan unassoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME, - scan_passive_time); + BRCMF_SCAN_PASSIVE_TIME); if (err) { - if (err == -EOPNOTSUPP) - brcmf_dbg(INFO, "Scan passive time is not supported\n"); - else - brcmf_err("Scan passive time error (%d)\n", err); + brcmf_err("Scan passive time error (%d)\n", err); goto dongle_scantime_out; } @@ -5619,7 +5833,8 @@ static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp) } static void brcmf_update_vht_cap(struct ieee80211_supported_band *band, - u32 bw_cap[2], u32 nchain) + u32 bw_cap[2], u32 nchain, u32 txstreams, + u32 txbf_bfe_cap, u32 txbf_bfr_cap) { __le16 mcs_map; @@ -5638,6 +5853,25 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band, mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9); band->vht_cap.vht_mcs.rx_mcs_map = mcs_map; band->vht_cap.vht_mcs.tx_mcs_map = mcs_map; + + /* Beamforming support information */ + if (txbf_bfe_cap & BRCMF_TXBF_SU_BFE_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + if (txbf_bfe_cap & BRCMF_TXBF_MU_BFE_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (txbf_bfr_cap & BRCMF_TXBF_SU_BFR_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + if (txbf_bfr_cap & BRCMF_TXBF_MU_BFR_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + if ((txbf_bfe_cap || txbf_bfr_cap) && (txstreams > 1)) { + band->vht_cap.cap |= + (2 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + band->vht_cap.cap |= ((txstreams - 1) << + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT); + band->vht_cap.cap |= + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB; + } } static int brcmf_setup_wiphybands(struct wiphy *wiphy) @@ -5652,6 +5886,9 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy) int err; s32 i; struct ieee80211_supported_band *band; + u32 txstreams = 0; + u32 txbf_bfe_cap = 0; + u32 txbf_bfr_cap = 0; (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode); err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode); @@ -5680,6 +5917,14 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy) return err; } + if (vhtmode) { + (void)brcmf_fil_iovar_int_get(ifp, "txstreams", &txstreams); + (void)brcmf_fil_iovar_int_get(ifp, "txbf_bfe_cap", + &txbf_bfe_cap); + (void)brcmf_fil_iovar_int_get(ifp, "txbf_bfr_cap", + &txbf_bfr_cap); + } + wiphy = cfg_to_wiphy(cfg); for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { band = wiphy->bands[i]; @@ -5689,7 +5934,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy) if (nmode) brcmf_update_ht_cap(band, bw_cap, nchain); if (vhtmode) - brcmf_update_vht_cap(band, bw_cap, nchain); + brcmf_update_vht_cap(band, bw_cap, nchain, txstreams, + txbf_bfe_cap, txbf_bfr_cap); } return 0; @@ -5864,7 +6110,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) } #ifdef CONFIG_PM -static const struct wiphy_wowlan_support brcmf_wowlan_support = { +static struct wiphy_wowlan_support brcmf_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, .n_patterns = BRCMF_WOWL_MAXPATTERNS, .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE, @@ -5873,10 +6119,23 @@ static const struct wiphy_wowlan_support brcmf_wowlan_support = { }; #endif -static void brcmf_wiphy_wowl_params(struct wiphy *wiphy) +static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) { #ifdef CONFIG_PM - /* wowl settings */ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + s32 err; + u32 wowl_cap; + + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { + err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap); + if (!err) { + if (wowl_cap & BRCMF_WOWL_PFN_FOUND) { + brcmf_wowlan_support.flags |= + WIPHY_WOWLAN_NET_DETECT; + init_waitqueue_head(&cfg->wowl.nd_data_wait); + } + } + } wiphy->wowlan = &brcmf_wowlan_support; #endif } @@ -5893,7 +6152,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; - wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; + wiphy->max_num_pmkids = BRCMF_MAXPMKID; err = brcmf_setup_ifmodes(wiphy, ifp); if (err) @@ -5922,9 +6181,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | WIPHY_FLAG_OFFCHAN_TX | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | - WIPHY_FLAG_SUPPORTS_TDLS; - if (!brcmf_roamoff) + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) + wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + if (!ifp->drvr->settings->roamoff) wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->max_remain_on_channel_duration = 5000; @@ -5936,8 +6196,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) - brcmf_wiphy_wowl_params(wiphy); - + brcmf_wiphy_wowl_params(wiphy, ifp); err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist, sizeof(bandlist)); if (err) { @@ -6004,8 +6263,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) /* make sure RF is ready for work */ brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); - brcmf_dongle_scantime(ifp, WL_SCAN_CHANNEL_TIME, - WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); + brcmf_dongle_scantime(ifp); power_mode = cfg->pwr_save ? PM_FAST : PM_OFF; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode); @@ -6014,7 +6272,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) brcmf_dbg(INFO, "power save set to %s\n", (power_mode ? "enabled" : "disabled")); - err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT); + err = brcmf_dongle_roam(ifp); if (err) goto default_conf_out; err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype, @@ -6316,13 +6574,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, goto wiphy_unreg_out; } - err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1); - if (err) { - brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err); - wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS; - } else { - brcmf_fweh_register(cfg->pub, BRCMF_E_TDLS_PEER_EVENT, - brcmf_notify_tdls_peer_event); + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) { + err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1); + if (err) { + brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err); + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS; + } else { + brcmf_fweh_register(cfg->pub, BRCMF_E_TDLS_PEER_EVENT, + brcmf_notify_tdls_peer_event); + } } /* (re-) activate FWEH event handling */ @@ -6332,6 +6592,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, goto wiphy_unreg_out; } + /* Fill in some of the advertised nl80211 supported features */ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_RANDOM_MAC)) { + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR; +#ifdef CONFIG_PM + if (wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT) + wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; +#endif + } + return cfg; wiphy_unreg_out: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 6a878c8f883f..40efb539ac26 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -21,18 +21,12 @@ #include <brcmu_d11.h> #define WL_NUM_SCAN_MAX 10 -#define WL_NUM_PMKIDS_MAX MAXPMKID #define WL_TLV_INFO_MAX 1024 #define WL_BSS_INFO_MAX 2048 #define WL_ASSOC_INFO_MAX 512 /* assoc related fil max buf */ #define WL_EXTRA_BUF_MAX 2048 #define WL_ROAM_TRIGGER_LEVEL -75 #define WL_ROAM_DELTA 20 -#define WL_BEACON_TIMEOUT 3 - -#define WL_SCAN_CHANNEL_TIME 40 -#define WL_SCAN_UNASSOC_TIME 40 -#define WL_SCAN_PASSIVE_TIME 120 #define WL_ESCAN_BUF_SIZE (1024 * 64) #define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */ @@ -77,6 +71,11 @@ #define BRCMF_MAX_DEFAULT_KEYS 4 +/* beacon loss timeout defaults */ +#define BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON 2 +#define BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_OFF 4 + +#define BRCMF_VIF_EVENT_TIMEOUT msecs_to_jiffies(1500) /** * enum brcmf_scan_status - scan engine status @@ -97,19 +96,6 @@ struct brcmf_cfg80211_conf { u32 rts_threshold; u32 retry_short; u32 retry_long; - s32 tx_power; - struct ieee80211_channel channel; -}; - -/* basic structure of scan request */ -struct brcmf_cfg80211_scan_req { - struct brcmf_ssid_le ssid_le; -}; - -/* basic structure of information element */ -struct brcmf_cfg80211_ie { - u16 offset; - u8 buf[WL_TLV_INFO_MAX]; }; /* security information with currently associated ap */ @@ -124,13 +110,11 @@ struct brcmf_cfg80211_security { /** * struct brcmf_cfg80211_profile - profile information. * - * @ssid: ssid of associated/associating ap. * @bssid: bssid of joined/joining ibss. * @sec: security information. * @key: key information */ struct brcmf_cfg80211_profile { - struct brcmf_ssid ssid; u8 bssid[ETH_ALEN]; struct brcmf_cfg80211_security sec; struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS]; @@ -180,7 +164,6 @@ struct vif_saved_ie { * @ifp: lower layer interface pointer * @wdev: wireless device. * @profile: profile information. - * @roam_off: roaming state. * @sme_state: SME state using enum brcmf_vif_status bits. * @pm_block: power-management blocked. * @list: linked list. @@ -191,7 +174,6 @@ struct brcmf_cfg80211_vif { struct brcmf_if *ifp; struct wireless_dev wdev; struct brcmf_cfg80211_profile profile; - s32 roam_off; unsigned long sme_state; bool pm_block; struct vif_saved_ie saved_ie; @@ -215,12 +197,6 @@ struct brcmf_cfg80211_assoc_ielen_le { __le32 resp_len; }; -/* wpa2 pmk list */ -struct brcmf_cfg80211_pmk_list { - struct pmkid_list pmkids; - struct pmkid foo[MAXPMKID - 1]; -}; - /* dongle escan state */ enum wl_escan_state { WL_ESCAN_STATE_IDLE, @@ -233,88 +209,7 @@ struct escan_info { struct wiphy *wiphy; struct brcmf_if *ifp; s32 (*run)(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, - struct cfg80211_scan_request *request, u16 action); -}; - -/** - * struct brcmf_pno_param_le - PNO scan configuration parameters - * - * @version: PNO parameters version. - * @scan_freq: scan frequency. - * @lost_network_timeout: #sec. to declare discovered network as lost. - * @flags: Bit field to control features of PFN such as sort criteria auto - * enable switch and background scan. - * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort - * criteria. - * @bestn: number of best networks in each scan. - * @mscan: number of scans recorded. - * @repeat: minimum number of scan intervals before scan frequency changes - * in adaptive scan. - * @exp: exponent of 2 for maximum scan interval. - * @slow_freq: slow scan period. - */ -struct brcmf_pno_param_le { - __le32 version; - __le32 scan_freq; - __le32 lost_network_timeout; - __le16 flags; - __le16 rssi_margin; - u8 bestn; - u8 mscan; - u8 repeat; - u8 exp; - __le32 slow_freq; -}; - -/** - * struct brcmf_pno_net_param_le - scan parameters per preferred network. - * - * @ssid: ssid name and its length. - * @flags: bit2: hidden. - * @infra: BSS vs IBSS. - * @auth: Open vs Closed. - * @wpa_auth: WPA type. - * @wsec: wsec value. - */ -struct brcmf_pno_net_param_le { - struct brcmf_ssid_le ssid; - __le32 flags; - __le32 infra; - __le32 auth; - __le32 wpa_auth; - __le32 wsec; -}; - -/** - * struct brcmf_pno_net_info_le - information per found network. - * - * @bssid: BSS network identifier. - * @channel: channel number only. - * @SSID_len: length of ssid. - * @SSID: ssid characters. - * @RSSI: receive signal strength (in dBm). - * @timestamp: age in seconds. - */ -struct brcmf_pno_net_info_le { - u8 bssid[ETH_ALEN]; - u8 channel; - u8 SSID_len; - u8 SSID[32]; - __le16 RSSI; - __le16 timestamp; -}; - -/** - * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event. - * - * @version: PNO version identifier. - * @status: indicates completion status of PNO scan. - * @count: amount of brcmf_pno_net_info_le entries appended. - */ -struct brcmf_pno_scanresults_le { - __le32 version; - __le32 status; - __le32 count; + struct cfg80211_scan_request *request); }; /** @@ -334,6 +229,27 @@ struct brcmf_cfg80211_vif_event { }; /** + * struct brcmf_cfg80211_wowl - wowl related information. + * + * @active: set on suspend, cleared on resume. + * @pre_pmmode: firmware PM mode at entering suspend. + * @nd: net dectect data. + * @nd_info: helper struct to pass to cfg80211. + * @nd_data_wait: wait queue to sync net detect data. + * @nd_data_completed: completion for net detect data. + * @nd_enabled: net detect enabled. + */ +struct brcmf_cfg80211_wowl { + bool active; + u32 pre_pmmode; + struct cfg80211_wowlan_nd_match *nd; + struct cfg80211_wowlan_nd_info *nd_info; + wait_queue_head_t nd_data_wait; + bool nd_data_completed; + bool nd_enabled; +}; + +/** * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface * * @wiphy: wiphy object for cfg80211 interface. @@ -343,9 +259,7 @@ struct brcmf_cfg80211_vif_event { * @scan_request: cfg80211 scan request object. * @usr_sync: mainly for dongle up/down synchronization. * @bss_list: bss_list holding scanned ap information. - * @scan_req_int: internal scan request object. * @bss_info: bss information for cfg80211 layer. - * @ie: information element object for internal purpose. * @conn_info: association info. * @pmk_list: wpa2 pmk list. * @scan_status: scan activity on the dongle. @@ -368,8 +282,7 @@ struct brcmf_cfg80211_vif_event { * @vif_list: linked list of vif instances. * @vif_cnt: number of vif instances. * @vif_event: vif event signalling. - * @wowl_enabled; set during suspend, is wowl used. - * @pre_wowl_pmmode: intermediate storage of pm mode during wowl. + * @wowl: wowl related information. */ struct brcmf_cfg80211_info { struct wiphy *wiphy; @@ -378,11 +291,9 @@ struct brcmf_cfg80211_info { struct brcmf_btcoex_info *btcoex; struct cfg80211_scan_request *scan_request; struct mutex usr_sync; - struct brcmf_cfg80211_scan_req scan_req_int; struct wl_cfg80211_bss_info *bss_info; - struct brcmf_cfg80211_ie ie; struct brcmf_cfg80211_connect_info conn_info; - struct brcmf_cfg80211_pmk_list *pmk_list; + struct brcmf_pmk_list_le pmk_list; unsigned long scan_status; struct brcmf_pub *pub; u32 channel; @@ -403,9 +314,8 @@ struct brcmf_cfg80211_info { struct brcmf_cfg80211_vif_event vif_event; struct completion vif_disabled; struct brcmu_d11inf d11inf; - bool wowl_enabled; - u32 pre_wowl_pmmode; struct brcmf_assoclist_le assoclist; + struct brcmf_cfg80211_wowl wowl; }; /** diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index f04833db2fd0..82e4382eb177 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -681,6 +681,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43569_CHIP_ID: case BRCM_CC_43570_CHIP_ID: case BRCM_CC_4358_CHIP_ID: + case BRCM_CC_4359_CHIP_ID: case BRCM_CC_43602_CHIP_ID: case BRCM_CC_4371_CHIP_ID: return 0x180000; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index f6b5feea23d2..f6b5feea23d2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index fe54844c75e0..4265b50faa98 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -29,13 +29,53 @@ const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -#define BRCMF_DEFAULT_BCN_TIMEOUT 3 #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 /* boost value for RSSI_DELTA in preferred join selection */ #define BRCMF_JOIN_PREF_RSSI_BOOST 8 +#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ + +static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE; +module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0); +MODULE_PARM_DESC(txglomsz, "Maximum tx packet chain size [SDIO]"); + +/* Debug level configuration. See debug.h for bits, sysfs modifiable */ +int brcmf_msg_level; +module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(debug, "Level of debug output"); + +static int brcmf_p2p_enable; +module_param_named(p2pon, brcmf_p2p_enable, int, 0); +MODULE_PARM_DESC(p2pon, "Enable legacy p2p management functionality"); + +static int brcmf_feature_disable; +module_param_named(feature_disable, brcmf_feature_disable, int, 0); +MODULE_PARM_DESC(feature_disable, "Disable features"); + +static char brcmf_firmware_path[BRCMF_FW_ALTPATH_LEN]; +module_param_string(alternative_fw_path, brcmf_firmware_path, + BRCMF_FW_ALTPATH_LEN, S_IRUSR); +MODULE_PARM_DESC(alternative_fw_path, "Alternative firmware path"); + +static int brcmf_fcmode; +module_param_named(fcmode, brcmf_fcmode, int, 0); +MODULE_PARM_DESC(fcmode, "Mode of firmware signalled flow control"); + +static int brcmf_roamoff; +module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); +MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine"); + +#ifdef DEBUG +/* always succeed brcmf_bus_start() */ +static int brcmf_ignore_probe_fail; +module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0); +MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); +#endif + +struct brcmf_mp_global_t brcmf_mp_global; + int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) { s8 eventmask[BRCMF_EVENTING_MASK_LEN]; @@ -107,26 +147,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } - /* - * Setup timeout if Beacons are lost and roam is off to report - * link down - */ - err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", - BRCMF_DEFAULT_BCN_TIMEOUT); - if (err) { - brcmf_err("bcn_timeout error (%d)\n", err); - goto done; - } - - /* Enable/Disable build-in roaming to allowed ext supplicant to take - * of romaing - */ - err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1); - if (err) { - brcmf_err("roam_off error (%d)\n", err); - goto done; - } - /* Setup join_pref to select target by RSSI(with boost on 5GHz) */ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA; join_pref_params[0].len = 2; @@ -174,6 +194,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } + /* Enable tx beamforming, errors can be ignored (not supported) */ + (void)brcmf_fil_iovar_int_set(ifp, "txbf", 1); + /* do bus specific preinit here */ err = brcmf_bus_preinit(ifp->drvr->bus_if); done: @@ -196,3 +219,34 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) va_end(args); } #endif + +void brcmf_mp_attach(void) +{ + strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path, + BRCMF_FW_ALTPATH_LEN); +} + +int brcmf_mp_device_attach(struct brcmf_pub *drvr) +{ + drvr->settings = kzalloc(sizeof(*drvr->settings), GFP_ATOMIC); + if (!drvr->settings) { + brcmf_err("Failed to alloca storage space for settings\n"); + return -ENOMEM; + } + + drvr->settings->sdiod_txglomsz = brcmf_sdiod_txglomsz; + drvr->settings->p2p_enable = !!brcmf_p2p_enable; + drvr->settings->feature_disable = brcmf_feature_disable; + drvr->settings->fcmode = brcmf_fcmode; + drvr->settings->roamoff = !!brcmf_roamoff; +#ifdef DEBUG + drvr->settings->ignore_probe_fail = !!brcmf_ignore_probe_fail; +#endif + return 0; +} + +void brcmf_mp_device_detach(struct brcmf_pub *drvr) +{ + kfree(drvr->settings); +} + diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h new file mode 100644 index 000000000000..3b0a63b98e99 --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2014 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef BRCMFMAC_COMMON_H +#define BRCMFMAC_COMMON_H + +extern const u8 ALLFFMAC[ETH_ALEN]; + +#define BRCMF_FW_ALTPATH_LEN 256 + +/* Definitions for the module global and device specific settings are defined + * here. Two structs are used for them. brcmf_mp_global_t and brcmf_mp_device. + * The mp_global is instantiated once in a global struct and gets initialized + * by the common_attach function which should be called before any other + * (module) initiliazation takes place. The device specific settings is part + * of the drvr struct and should be initialized on every brcmf_attach. + */ + +/** + * struct brcmf_mp_global_t - Global module paramaters. + * + * @firmware_path: Alternative firmware path. + */ +struct brcmf_mp_global_t { + char firmware_path[BRCMF_FW_ALTPATH_LEN]; +}; + +extern struct brcmf_mp_global_t brcmf_mp_global; + +/** + * struct brcmf_mp_device - Device module paramaters. + * + * @sdiod_txglomsz: SDIO txglom size. + * @joinboost_5g_rssi: 5g rssi booost for preferred join selection. + * @p2p_enable: Legacy P2P0 enable (old wpa_supplicant). + * @feature_disable: Feature_disable bitmask. + * @fcmode: FWS flow control. + * @roamoff: Firmware roaming off? + */ +struct brcmf_mp_device { + int sdiod_txglomsz; + int joinboost_5g_rssi; + bool p2p_enable; + int feature_disable; + int fcmode; + bool roamoff; + bool ignore_probe_fail; +}; + +void brcmf_mp_attach(void); +int brcmf_mp_device_attach(struct brcmf_pub *drvr); +void brcmf_mp_device_detach(struct brcmf_pub *drvr); +#ifdef DEBUG +static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr) +{ + return drvr->settings->ignore_probe_fail; +} +#else +static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr) +{ + return false; +} +#endif + +/* Sets dongle media info (drv_version, mac address). */ +int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); + +#endif /* BRCMFMAC_COMMON_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c index 7b0e52195a85..7b0e52195a85 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h index b85033611c8d..b85033611c8d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b5ab98ee1445..ed9998b69709 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/etherdevice.h> #include <linux/module.h> +#include <linux/inetdevice.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <brcmu_utils.h> @@ -39,7 +40,7 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); MODULE_LICENSE("Dual BSD/GPL"); -#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */ +#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(50) /* AMPDU rx reordering definitions */ #define BRCMF_RXREORDER_FLOWID_OFFSET 0 @@ -56,30 +57,13 @@ MODULE_LICENSE("Dual BSD/GPL"); #define BRCMF_BSSIDX_INVALID -1 -/* Error bits */ -int brcmf_msg_level; -module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); -MODULE_PARM_DESC(debug, "level of debug output"); - -/* P2P0 enable */ -static int brcmf_p2p_enable; -module_param_named(p2pon, brcmf_p2p_enable, int, 0); -MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality"); - -char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) +char *brcmf_ifname(struct brcmf_if *ifp) { - if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { - brcmf_err("ifidx %d out of range\n", ifidx); - return "<if_bad>"; - } - - if (drvr->iflist[ifidx] == NULL) { - brcmf_err("null i/f %d\n", ifidx); + if (!ifp) return "<if_null>"; - } - if (drvr->iflist[ifidx]->ndev) - return drvr->iflist[ifidx]->ndev->name; + if (ifp->ndev) + return ifp->ndev->name; return "<if_none>"; } @@ -87,7 +71,7 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx) { struct brcmf_if *ifp; - s32 bssidx; + s32 bsscfgidx; if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { brcmf_err("ifidx %d out of range\n", ifidx); @@ -95,9 +79,9 @@ struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx) } ifp = NULL; - bssidx = drvr->if2bss[ifidx]; - if (bssidx >= 0) - ifp = drvr->iflist[bssidx]; + bsscfgidx = drvr->if2bss[ifidx]; + if (bsscfgidx >= 0) + ifp = drvr->iflist[bsscfgidx]; return ifp; } @@ -115,7 +99,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work) ifp = container_of(work, struct brcmf_if, multicast_work); - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); ndev = ifp->ndev; @@ -175,7 +159,7 @@ _brcmf_set_mac_address(struct work_struct *work) ifp = container_of(work, struct brcmf_if, setmacaddr_work); - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr, ETH_ALEN); @@ -213,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh = (struct ethhdr *)(skb->data); - brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); /* Can the device send data? */ if (drvr->bus_if->state != BRCMF_BUS_UP) { @@ -224,27 +208,19 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } - if (!drvr->iflist[ifp->bssidx]) { - brcmf_err("bad ifidx %d\n", ifp->bssidx); - netif_stop_queue(ndev); - dev_kfree_skb(skb); - ret = -ENODEV; - goto done; - } - /* Make sure there's enough room for any header */ if (skb_headroom(skb) < drvr->hdrlen) { struct sk_buff *skb2; brcmf_dbg(INFO, "%s: insufficient headroom\n", - brcmf_ifname(drvr, ifp->bssidx)); + brcmf_ifname(ifp)); drvr->bus_if->tx_realloc++; skb2 = skb_realloc_headroom(skb, drvr->hdrlen); dev_kfree_skb(skb); skb = skb2; if (skb == NULL) { brcmf_err("%s: skb_realloc_headroom failed\n", - brcmf_ifname(drvr, ifp->bssidx)); + brcmf_ifname(ifp)); ret = -ENOMEM; goto done; } @@ -282,8 +258,8 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, if (!ifp || !ifp->ndev) return; - brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", - ifp->bssidx, ifp->netif_stop, reason, state); + brcmf_dbg(TRACE, "enter: bsscfgidx=%d stop=0x%X reason=%d state=%d\n", + ifp->bsscfgidx, ifp->netif_stop, reason, state); spin_lock_irqsave(&ifp->netif_stop_lock, flags); if (state) { @@ -602,7 +578,7 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); return &ifp->stats; } @@ -631,10 +607,12 @@ static int brcmf_netdev_stop(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); brcmf_cfg80211_down(ndev); + brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0); + brcmf_net_setcarrier(ifp, false); return 0; @@ -647,7 +625,7 @@ static int brcmf_netdev_open(struct net_device *ndev) struct brcmf_bus *bus_if = drvr->bus_if; u32 toe_ol; - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); /* If bus is not ready, can't continue */ if (bus_if->state != BRCMF_BUS_UP) { @@ -689,7 +667,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) struct net_device *ndev; s32 err; - brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx, + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx, ifp->mac_addr); ndev = ifp->ndev; @@ -721,7 +699,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) return 0; fail: - drvr->iflist[ifp->bssidx] = NULL; + drvr->iflist[ifp->bsscfgidx] = NULL; ndev->netdev_ops = NULL; free_netdev(ndev); return -EBADE; @@ -739,7 +717,8 @@ void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on) { struct net_device *ndev; - brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d carrier=%d\n", ifp->bsscfgidx, + on); ndev = ifp->ndev; brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on); @@ -786,7 +765,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) { struct net_device *ndev; - brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx, + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx, ifp->mac_addr); ndev = ifp->ndev; @@ -805,39 +784,40 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) return 0; fail: - ifp->drvr->iflist[ifp->bssidx] = NULL; + ifp->drvr->iflist[ifp->bsscfgidx] = NULL; ndev->netdev_ops = NULL; free_netdev(ndev); return -EBADE; } -struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, +struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, bool is_p2pdev, char *name, u8 *mac_addr) { struct brcmf_if *ifp; struct net_device *ndev; - brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx); + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifidx); - ifp = drvr->iflist[bssidx]; + ifp = drvr->iflist[bsscfgidx]; /* * Delete the existing interface before overwriting it * in case we missed the BRCMF_E_IF_DEL event. */ if (ifp) { - brcmf_err("ERROR: netdev:%s already exists\n", - ifp->ndev->name); if (ifidx) { + brcmf_err("ERROR: netdev:%s already exists\n", + ifp->ndev->name); netif_stop_queue(ifp->ndev); brcmf_net_detach(ifp->ndev); - drvr->iflist[bssidx] = NULL; + drvr->iflist[bsscfgidx] = NULL; } else { - brcmf_err("ignore IF event\n"); + brcmf_dbg(INFO, "netdev:%s ignore IF event\n", + ifp->ndev->name); return ERR_PTR(-EINVAL); } } - if (!brcmf_p2p_enable && is_p2pdev) { + if (!drvr->settings->p2p_enable && is_p2pdev) { /* this is P2P_DEVICE interface */ brcmf_dbg(INFO, "allocate non-netdev interface\n"); ifp = kzalloc(sizeof(*ifp), GFP_KERNEL); @@ -854,15 +834,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, ndev->destructor = brcmf_cfg80211_free_netdev; ifp = netdev_priv(ndev); ifp->ndev = ndev; - /* store mapping ifidx to bssidx */ + /* store mapping ifidx to bsscfgidx */ if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID) - drvr->if2bss[ifidx] = bssidx; + drvr->if2bss[ifidx] = bsscfgidx; } ifp->drvr = drvr; - drvr->iflist[bssidx] = ifp; + drvr->iflist[bsscfgidx] = ifp; ifp->ifidx = ifidx; - ifp->bssidx = bssidx; + ifp->bsscfgidx = bsscfgidx; init_waitqueue_head(&ifp->pend_8021x_wait); spin_lock_init(&ifp->netif_stop_lock); @@ -876,21 +856,22 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, return ifp; } -static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) +static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx) { struct brcmf_if *ifp; - ifp = drvr->iflist[bssidx]; - drvr->iflist[bssidx] = NULL; + ifp = drvr->iflist[bsscfgidx]; + drvr->iflist[bsscfgidx] = NULL; if (!ifp) { - brcmf_err("Null interface, idx=%d\n", bssidx); + brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx); return; } - brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx); - if (drvr->if2bss[ifp->ifidx] == bssidx) + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, + ifp->ifidx); + if (drvr->if2bss[ifp->ifidx] == bsscfgidx) drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID; if (ifp->ndev) { - if (bssidx == 0) { + if (bsscfgidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { rtnl_lock(); brcmf_netdev_stop(ifp->ndev); @@ -920,12 +901,12 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) void brcmf_remove_interface(struct brcmf_if *ifp) { - if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp)) + if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp)) return; - brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx, + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx, ifp->ifidx); brcmf_fws_del_interface(ifp); - brcmf_del_if(ifp->drvr, ifp->bssidx); + brcmf_del_if(ifp->drvr, ifp->bsscfgidx); } int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) @@ -940,10 +921,10 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) highest = 2; for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) { if (drvr->iflist[ifidx]) { - if (drvr->iflist[ifidx]->bssidx == bsscfgidx) + if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx) bsscfgidx = highest + 1; - else if (drvr->iflist[ifidx]->bssidx > highest) - highest = drvr->iflist[ifidx]->bssidx; + else if (drvr->iflist[ifidx]->bsscfgidx > highest) + highest = drvr->iflist[ifidx]->bsscfgidx; } else { available = true; } @@ -952,6 +933,98 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) return available ? bsscfgidx : -ENOMEM; } +#ifdef CONFIG_INET +#define ARPOL_MAX_ENTRIES 8 +static int brcmf_inetaddr_changed(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct brcmf_pub *drvr = container_of(nb, struct brcmf_pub, + inetaddr_notifier); + struct in_ifaddr *ifa = data; + struct net_device *ndev = ifa->ifa_dev->dev; + struct brcmf_if *ifp; + int idx, i, ret; + u32 val; + __be32 addr_table[ARPOL_MAX_ENTRIES] = {0}; + + /* Find out if the notification is meant for us */ + for (idx = 0; idx < BRCMF_MAX_IFS; idx++) { + ifp = drvr->iflist[idx]; + if (ifp && ifp->ndev == ndev) + break; + if (idx == BRCMF_MAX_IFS - 1) + return NOTIFY_DONE; + } + + /* check if arp offload is supported */ + ret = brcmf_fil_iovar_int_get(ifp, "arpoe", &val); + if (ret) + return NOTIFY_OK; + + /* old version only support primary index */ + ret = brcmf_fil_iovar_int_get(ifp, "arp_version", &val); + if (ret) + val = 1; + if (val == 1) + ifp = drvr->iflist[0]; + + /* retrieve the table from firmware */ + ret = brcmf_fil_iovar_data_get(ifp, "arp_hostip", addr_table, + sizeof(addr_table)); + if (ret) { + brcmf_err("fail to get arp ip table err:%d\n", ret); + return NOTIFY_OK; + } + + for (i = 0; i < ARPOL_MAX_ENTRIES; i++) + if (ifa->ifa_address == addr_table[i]) + break; + + switch (action) { + case NETDEV_UP: + if (i == ARPOL_MAX_ENTRIES) { + brcmf_dbg(TRACE, "add %pI4 to arp table\n", + &ifa->ifa_address); + /* set it directly */ + ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip", + &ifa->ifa_address, sizeof(ifa->ifa_address)); + if (ret) + brcmf_err("add arp ip err %d\n", ret); + } + break; + case NETDEV_DOWN: + if (i < ARPOL_MAX_ENTRIES) { + addr_table[i] = 0; + brcmf_dbg(TRACE, "remove %pI4 from arp table\n", + &ifa->ifa_address); + /* clear the table in firmware */ + ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", + NULL, 0); + if (ret) { + brcmf_err("fail to clear arp ip table err:%d\n", + ret); + return NOTIFY_OK; + } + for (i = 0; i < ARPOL_MAX_ENTRIES; i++) { + if (addr_table[i] != 0) { + brcmf_fil_iovar_data_set(ifp, + "arp_hostip", &addr_table[i], + sizeof(addr_table[i])); + if (ret) + brcmf_err("add arp ip err %d\n", + ret); + } + } + } + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif + int brcmf_attach(struct device *dev) { struct brcmf_pub *drvr = NULL; @@ -975,6 +1048,10 @@ int brcmf_attach(struct device *dev) drvr->bus_if = dev_get_drvdata(dev); drvr->bus_if->drvr = drvr; + /* Initialize device specific settings */ + if (brcmf_mp_device_attach(drvr)) + goto fail; + /* attach debug facilities */ brcmf_debug_attach(drvr); @@ -1067,7 +1144,7 @@ int brcmf_bus_start(struct device *dev) brcmf_fws_add_interface(ifp); drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev, - brcmf_p2p_enable); + drvr->settings->p2p_enable); if (drvr->config == NULL) { ret = -ENOMEM; goto fail; @@ -1075,11 +1152,20 @@ int brcmf_bus_start(struct device *dev) ret = brcmf_net_attach(ifp, false); - if ((!ret) && (brcmf_p2p_enable)) { + if ((!ret) && (drvr->settings->p2p_enable)) { p2p_ifp = drvr->iflist[1]; if (p2p_ifp) ret = brcmf_net_p2p_attach(p2p_ifp); } + + if (ret) + goto fail; + +#ifdef CONFIG_INET + drvr->inetaddr_notifier.notifier_call = brcmf_inetaddr_changed; + ret = register_inetaddr_notifier(&drvr->inetaddr_notifier); +#endif + fail: if (ret < 0) { brcmf_err("failed: %d\n", ret); @@ -1095,6 +1181,10 @@ fail: brcmf_net_detach(ifp->ndev); if (p2p_ifp) brcmf_net_detach(p2p_ifp->ndev); + drvr->iflist[0] = NULL; + drvr->iflist[1] = NULL; + if (brcmf_ignoring_probe_fail(drvr)) + ret = 0; return ret; } return 0; @@ -1143,6 +1233,10 @@ void brcmf_detach(struct device *dev) if (drvr == NULL) return; +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&drvr->inetaddr_notifier); +#endif + /* stop firmware event handling */ brcmf_fweh_detach(drvr); if (drvr->config) @@ -1162,6 +1256,8 @@ void brcmf_detach(struct device *dev) brcmf_proto_detach(drvr); + brcmf_mp_device_detach(drvr); + brcmf_debug_detach(drvr); bus_if->drvr = NULL; kfree(drvr); @@ -1186,7 +1282,7 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp) err = wait_event_timeout(ifp->pend_8021x_wait, !brcmf_get_pend_8021x_cnt(ifp), - msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX)); + MAX_WAIT_FOR_8021X_TX); WARN_ON(!err); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 2f9101b2ad34..8f39435f976f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -69,8 +69,8 @@ struct brcmf_ampdu_rx_reorder { /* Forward decls for struct brcmf_pub (see below) */ struct brcmf_proto; /* device communication protocol info */ -struct brcmf_cfg80211_dev; /* cfg80211 device info */ -struct brcmf_fws_info; /* firmware signalling info */ +struct brcmf_fws_info; /* firmware signalling info */ +struct brcmf_mp_device; /* module paramateres, device specific */ /* * struct brcmf_rev_info @@ -141,6 +141,9 @@ struct brcmf_pub { #ifdef DEBUG struct dentry *dbgfs_dir; #endif + + struct notifier_block inetaddr_notifier; + struct brcmf_mp_device *settings; }; /* forward declarations */ @@ -174,7 +177,7 @@ enum brcmf_netif_stop_reason { * @multicast_work: worker object for multicast provisioning. * @fws_desc: interface specific firmware-signalling descriptor. * @ifidx: interface index in device firmware. - * @bssidx: index of bss associated with this interface. + * @bsscfgidx: index of bss associated with this interface. * @mac_addr: assigned mac address. * @netif_stop: bitmap indicates reason why netif queues are stopped. * @netif_stop_lock: spinlock for update netif_stop from multiple sources. @@ -190,7 +193,7 @@ struct brcmf_if { struct work_struct multicast_work; struct brcmf_fws_mac_descriptor *fws_desc; int ifidx; - s32 bssidx; + s32 bsscfgidx; u8 mac_addr[ETH_ALEN]; u8 netif_stop; spinlock_t netif_stop_lock; @@ -205,10 +208,10 @@ struct brcmf_skb_reorder_data { int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); /* Return pointer to interface name */ -char *brcmf_ifname(struct brcmf_pub *drvr, int idx); +char *brcmf_ifname(struct brcmf_if *ifp); struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx); int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); -struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, +struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, bool is_p2pdev, char *name, u8 *mac_addr); void brcmf_remove_interface(struct brcmf_if *ifp); int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 1299dccc78b4..e64557c35553 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -49,7 +49,7 @@ static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp, const struct brcmf_event_msg *evtmsg, void *data) { - brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); return brcmf_debug_create_memdump(ifp->drvr->bus_if, data, evtmsg->datalen); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index d0d9676f7f9d..6687812770cc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -17,6 +17,8 @@ #ifndef BRCMFMAC_DEBUG_H #define BRCMFMAC_DEBUG_H +#include <linux/net.h> /* net_ratelimit() */ + /* message levels */ #define BRCMF_TRACE_VAL 0x00000002 #define BRCMF_INFO_VAL 0x00000004 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 44bb30636690..1ffa95f1b8d2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -18,18 +18,16 @@ #include <linux/module.h> #include <brcm_hw_ids.h> +#include <brcmu_wifi.h> #include "core.h" #include "bus.h" #include "debug.h" #include "fwil.h" +#include "fwil_types.h" #include "feature.h" +#include "common.h" -/* Module param feature_disable (global for all devices) */ -static int brcmf_feature_disable; -module_param_named(feature_disable, brcmf_feature_disable, int, 0); -MODULE_PARM_DESC(feature_disable, "Disable features"); - /* * expand feature list to array of feature strings. */ @@ -40,6 +38,17 @@ static const char *brcmf_feat_names[] = { }; #undef BRCMF_FEAT_DEF +struct brcmf_feat_fwcap { + enum brcmf_feat_id feature; + const char * const fwcap_id; +}; + +static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { + { BRCMF_FEAT_MBSS, "mbss" }, + { BRCMF_FEAT_MCHAN, "mchan" }, + { BRCMF_FEAT_P2P, "p2p" }, +}; + #ifdef DEBUG /* * expand quirk list to array of quirk strings. @@ -104,44 +113,53 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, } } -/** - * brcmf_feat_iovar_int_set() - determine feature through iovar set. - * - * @ifp: interface to query. - * @id: feature id. - * @name: iovar name. - */ -static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp, - enum brcmf_feat_id id, char *name, u32 val) +static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) { - int err; - - err = brcmf_fil_iovar_int_set(ifp, name, val); - if (err == 0) { - brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); - ifp->drvr->feat_flags |= BIT(id); - } else { - brcmf_dbg(TRACE, "%s feature check failed: %d\n", - brcmf_feat_names[id], err); + char caps[256]; + enum brcmf_feat_id id; + int i; + + brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + brcmf_dbg(INFO, "[ %s]\n", caps); + + for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) { + if (strnstr(caps, brcmf_fwcap_map[i].fwcap_id, sizeof(caps))) { + id = brcmf_fwcap_map[i].feature; + brcmf_dbg(INFO, "enabling feature: %s\n", + brcmf_feat_names[id]); + ifp->drvr->feat_flags |= BIT(id); + } } } void brcmf_feat_attach(struct brcmf_pub *drvr) { struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); + struct brcmf_pno_macaddr_le pfn_mac; + s32 err; + + brcmf_feat_firmware_capabilities(ifp); - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); if (drvr->bus_if->wowl_supported) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); - if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) - brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p"); - - if (brcmf_feature_disable) { + /* MBSS does not work for 43362 */ + if (drvr->bus_if->chip == BRCM_CC_43362_CHIP_ID) + ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode"); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable"); + + pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER; + err = brcmf_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac, + sizeof(pfn_mac)); + if (!err) + ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + + if (drvr->settings->feature_disable) { brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", - ifp->drvr->feat_flags, brcmf_feature_disable); - ifp->drvr->feat_flags &= ~brcmf_feature_disable; + ifp->drvr->feat_flags, + drvr->settings->feature_disable); + ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; } /* set chip related quirks */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 6b381f799f22..2e2479d41337 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -24,13 +24,20 @@ * PNO: preferred network offload. * WOWL: Wake-On-WLAN. * P2P: peer-to-peer + * RSDB: Real Simultaneous Dual Band + * TDLS: Tunneled Direct Link Setup + * SCAN_RANDOM_MAC: Random MAC during (net detect) scheduled scan. */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ BRCMF_FEAT_DEF(MCHAN) \ BRCMF_FEAT_DEF(PNO) \ BRCMF_FEAT_DEF(WOWL) \ - BRCMF_FEAT_DEF(P2P) + BRCMF_FEAT_DEF(P2P) \ + BRCMF_FEAT_DEF(RSDB) \ + BRCMF_FEAT_DEF(TDLS) \ + BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) + /* * Quirks: * diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 4248f3c80e78..1365c12b78fc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -23,15 +23,13 @@ #include "debug.h" #include "firmware.h" +#include "core.h" +#include "common.h" #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ #define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ -char brcmf_firmware_path[BRCMF_FW_PATH_LEN]; -module_param_string(alternative_fw_path, brcmf_firmware_path, - BRCMF_FW_PATH_LEN, 0440); - enum nvram_parser_state { IDLE, KEY, @@ -449,8 +447,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) if (raw_nvram) bcm47xx_nvram_release_contents(data); - if (fw) - release_firmware(fw); + release_firmware(fw); if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) goto fail; @@ -540,3 +537,45 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 0); } +int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, + struct brcmf_firmware_mapping mapping_table[], + u32 table_size, char fw_name[BRCMF_FW_NAME_LEN], + char nvram_name[BRCMF_FW_NAME_LEN]) +{ + u32 i; + char end; + + for (i = 0; i < table_size; i++) { + if (mapping_table[i].chipid == chip && + mapping_table[i].revmask & BIT(chiprev)) + break; + } + + if (i == table_size) { + brcmf_err("Unknown chipid %d [%d]\n", chip, chiprev); + return -ENODEV; + } + + /* check if firmware path is provided by module parameter */ + if (brcmf_mp_global.firmware_path[0] != '\0') { + strlcpy(fw_name, brcmf_mp_global.firmware_path, + BRCMF_FW_NAME_LEN); + if ((nvram_name) && (mapping_table[i].nvram)) + strlcpy(nvram_name, brcmf_mp_global.firmware_path, + BRCMF_FW_NAME_LEN); + + end = brcmf_mp_global.firmware_path[ + strlen(brcmf_mp_global.firmware_path) - 1]; + if (end != '/') { + strlcat(fw_name, "/", BRCMF_FW_NAME_LEN); + if ((nvram_name) && (mapping_table[i].nvram)) + strlcat(nvram_name, "/", BRCMF_FW_NAME_LEN); + } + } + strlcat(fw_name, mapping_table[i].fw, BRCMF_FW_NAME_LEN); + if ((nvram_name) && (mapping_table[i].nvram)) + strlcat(nvram_name, mapping_table[i].nvram, BRCMF_FW_NAME_LEN); + + return 0; +} + diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 604dd48ab4e0..ef06f57a7a0e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -21,11 +21,51 @@ #define BRCMF_FW_REQ_FLAGS 0x00F0 #define BRCMF_FW_REQ_NV_OPTIONAL 0x0010 -#define BRCMF_FW_PATH_LEN 256 -#define BRCMF_FW_NAME_LEN 32 +#define BRCMF_FW_NAME_LEN 320 -extern char brcmf_firmware_path[]; +#define BRCMF_FW_DEFAULT_PATH "brcm/" +/** + * struct brcmf_firmware_mapping - Used to map chipid/revmask to firmware + * filename and nvram filename. Each bus type implementation should create + * a table of firmware mappings (using the macros defined below). + * + * @chipid: ID of chip. + * @revmask: bitmask of revisions, e.g. 0x10 means rev 4 only, 0xf means rev 0-3 + * @fw: name of the firmware file. + * @nvram: name of nvram file. + */ +struct brcmf_firmware_mapping { + u32 chipid; + u32 revmask; + const char *fw; + const char *nvram; +}; + +#define BRCMF_FW_NVRAM_DEF(fw_nvram_name, fw, nvram) \ +static const char BRCM_ ## fw_nvram_name ## _FIRMWARE_NAME[] = \ + BRCMF_FW_DEFAULT_PATH fw; \ +static const char BRCM_ ## fw_nvram_name ## _NVRAM_NAME[] = \ + BRCMF_FW_DEFAULT_PATH nvram; \ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw); \ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH nvram) + +#define BRCMF_FW_DEF(fw_name, fw) \ +static const char BRCM_ ## fw_name ## _FIRMWARE_NAME[] = \ + BRCMF_FW_DEFAULT_PATH fw; \ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw) \ + +#define BRCMF_FW_NVRAM_ENTRY(chipid, mask, name) \ + { chipid, mask, \ + BRCM_ ## name ## _FIRMWARE_NAME, BRCM_ ## name ## _NVRAM_NAME } + +#define BRCMF_FW_ENTRY(chipid, mask, name) \ + { chipid, mask, BRCM_ ## name ## _FIRMWARE_NAME, NULL } + +int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, + struct brcmf_firmware_mapping mapping_table[], + u32 table_size, char fw_name[BRCMF_FW_NAME_LEN], + char nvram_name[BRCMF_FW_NAME_LEN]); void brcmf_fw_nvram_free(void *nvram); /* * Request firmware(s) asynchronously. When the asynchronous request diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index 2ca783fa50cf..2ca783fa50cf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h index 95fd1c9675d1..95fd1c9675d1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 3878b6f6cfce..7b26fb1b437c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -182,8 +182,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, bool is_p2pdev; int err = 0; - brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n", - ifevent->action, ifevent->ifidx, ifevent->bssidx, + brcmf_dbg(EVENT, "action: %u ifidx: %u bsscfgidx: %u flags: %u role: %u\n", + ifevent->action, ifevent->ifidx, ifevent->bsscfgidx, ifevent->flags, ifevent->role); /* The P2P Device interface event must not be ignored contrary to what @@ -204,12 +204,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; } - ifp = drvr->iflist[ifevent->bssidx]; + ifp = drvr->iflist[ifevent->bsscfgidx]; if (ifevent->action == BRCMF_E_IF_ADD) { brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname, emsg->addr); - ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx, + ifp = brcmf_add_if(drvr, ifevent->bsscfgidx, ifevent->ifidx, is_p2pdev, emsg->ifname, emsg->addr); if (IS_ERR(ifp)) return; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index d9a942842382..5e39e2a9e388 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -219,7 +219,7 @@ struct brcmf_if_event { u8 ifidx; u8 action; u8 flags; - u8 bssidx; + u8 bsscfgidx; u8 role; }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index dcfa0bb149ce..f6a2df94dba7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -126,7 +126,8 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) brcmf_dbg(FIL, "Failed: %s (%d)\n", brcmf_fil_get_errstr((u32)(-err)), err); - return -EBADE; + + return err; } s32 @@ -293,22 +294,22 @@ brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data) } static u32 -brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf, - u32 buflen) +brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen, + char *buf, u32 buflen) { const s8 *prefix = "bsscfg:"; s8 *p; u32 prefixlen; u32 namelen; u32 iolen; - __le32 bssidx_le; + __le32 bsscfgidx_le; - if (bssidx == 0) + if (bsscfgidx == 0) return brcmf_create_iovar(name, data, datalen, buf, buflen); prefixlen = strlen(prefix); namelen = strlen(name) + 1; /* lengh of iovar name + null */ - iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen; + iolen = prefixlen + namelen + sizeof(bsscfgidx_le) + datalen; if (buflen < iolen) { brcmf_err("buffer is too short\n"); @@ -326,9 +327,9 @@ brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf, p += namelen; /* bss config index as first data */ - bssidx_le = cpu_to_le32(bssidx); - memcpy(p, &bssidx_le, sizeof(bssidx_le)); - p += sizeof(bssidx_le); + bsscfgidx_le = cpu_to_le32(bsscfgidx); + memcpy(p, &bsscfgidx_le, sizeof(bsscfgidx_le)); + p += sizeof(bsscfgidx_le); /* parameter buffer follows */ if (datalen) @@ -347,12 +348,12 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, mutex_lock(&drvr->proto_block); - brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx, - ifp->bssidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx, + ifp->bsscfgidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); - buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len, + buflen = brcmf_create_bsscfg(ifp->bsscfgidx, name, data, len, drvr->proto_buf, sizeof(drvr->proto_buf)); if (buflen) { err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf, @@ -376,7 +377,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, mutex_lock(&drvr->proto_block); - buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len, + buflen = brcmf_create_bsscfg(ifp->bsscfgidx, name, data, len, drvr->proto_buf, sizeof(drvr->proto_buf)); if (buflen) { err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf, @@ -387,8 +388,8 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, err = -EPERM; brcmf_err("Creating bsscfg failed\n"); } - brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx, - ifp->bssidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx, + ifp->bsscfgidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index b20fc0f82a48..6b72df17744e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -70,6 +70,7 @@ #define BRCMF_C_SET_WSEC 134 #define BRCMF_C_GET_PHY_NOISE 135 #define BRCMF_C_GET_BSS_INFO 136 +#define BRCMF_C_GET_GET_PKTCNTS 137 #define BRCMF_C_GET_BANDLIST 140 #define BRCMF_C_SET_SCB_TIMEOUT 158 #define BRCMF_C_GET_ASSOCLIST 159 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index daa427b46712..1afc2ad83b6c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -110,6 +110,8 @@ #define BRCMF_WOWL_UNASSOC (1 << 24) /* Wakeup if received matched secured pattern: */ #define BRCMF_WOWL_SECURE (1 << 25) +/* Wakeup on finding preferred network */ +#define BRCMF_WOWL_PFN_FOUND (1 << 26) /* Link Down indication in WoWL mode: */ #define BRCMF_WOWL_LINKDOWN (1 << 31) @@ -121,6 +123,17 @@ #define BRCMF_MAX_ASSOCLIST 128 +#define BRCMF_TXBF_SU_BFE_CAP BIT(0) +#define BRCMF_TXBF_MU_BFE_CAP BIT(1) +#define BRCMF_TXBF_SU_BFR_CAP BIT(0) +#define BRCMF_TXBF_MU_BFR_CAP BIT(1) + +#define BRCMF_MAXPMKID 16 /* max # PMKID cache entries */ + +#define BRCMF_PFN_MACADDR_CFG_VER 1 +#define BRCMF_PFN_MAC_OUI_ONLY BIT(0) +#define BRCMF_PFN_SET_MAC_UNASSOC BIT(1) + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, @@ -170,7 +183,7 @@ struct brcmf_fil_af_params_le { }; struct brcmf_fil_bss_enable_le { - __le32 bsscfg_idx; + __le32 bsscfgidx; __le32 enable; }; @@ -282,14 +295,9 @@ struct brcm_rateset_le { u8 rates[BRCMF_MAXRATES_IN_SET]; }; -struct brcmf_ssid { - u32 SSID_len; - unsigned char SSID[32]; -}; - struct brcmf_ssid_le { __le32 SSID_len; - unsigned char SSID[32]; + unsigned char SSID[IEEE80211_MAX_SSID_LEN]; }; struct brcmf_scan_params_le { @@ -634,4 +642,149 @@ struct brcmf_assoclist_le { u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN]; }; +/** + * struct brcmf_wowl_wakeind_le - Wakeup indicators + * Note: note both fields contain same information. + * + * @pci_wakeind: Whether PCI PMECSR PMEStatus bit was set. + * @ucode_wakeind: What wakeup-event indication was set by ucode + */ +struct brcmf_wowl_wakeind_le { + __le32 pci_wakeind; + __le32 ucode_wakeind; +}; + +/** + * struct brcmf_pmksa - PMK Security Association + * + * @bssid: The AP's BSSID. + * @pmkid: he PMK material itself. + */ +struct brcmf_pmksa { + u8 bssid[ETH_ALEN]; + u8 pmkid[WLAN_PMKID_LEN]; +}; + +/** + * struct brcmf_pmk_list_le - List of pmksa's. + * + * @npmk: Number of pmksa's. + * @pmk: PMK SA information. + */ +struct brcmf_pmk_list_le { + __le32 npmk; + struct brcmf_pmksa pmk[BRCMF_MAXPMKID]; +}; + +/** + * struct brcmf_pno_param_le - PNO scan configuration parameters + * + * @version: PNO parameters version. + * @scan_freq: scan frequency. + * @lost_network_timeout: #sec. to declare discovered network as lost. + * @flags: Bit field to control features of PFN such as sort criteria auto + * enable switch and background scan. + * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort + * criteria. + * @bestn: number of best networks in each scan. + * @mscan: number of scans recorded. + * @repeat: minimum number of scan intervals before scan frequency changes + * in adaptive scan. + * @exp: exponent of 2 for maximum scan interval. + * @slow_freq: slow scan period. + */ +struct brcmf_pno_param_le { + __le32 version; + __le32 scan_freq; + __le32 lost_network_timeout; + __le16 flags; + __le16 rssi_margin; + u8 bestn; + u8 mscan; + u8 repeat; + u8 exp; + __le32 slow_freq; +}; + +/** + * struct brcmf_pno_net_param_le - scan parameters per preferred network. + * + * @ssid: ssid name and its length. + * @flags: bit2: hidden. + * @infra: BSS vs IBSS. + * @auth: Open vs Closed. + * @wpa_auth: WPA type. + * @wsec: wsec value. + */ +struct brcmf_pno_net_param_le { + struct brcmf_ssid_le ssid; + __le32 flags; + __le32 infra; + __le32 auth; + __le32 wpa_auth; + __le32 wsec; +}; + +/** + * struct brcmf_pno_net_info_le - information per found network. + * + * @bssid: BSS network identifier. + * @channel: channel number only. + * @SSID_len: length of ssid. + * @SSID: ssid characters. + * @RSSI: receive signal strength (in dBm). + * @timestamp: age in seconds. + */ +struct brcmf_pno_net_info_le { + u8 bssid[ETH_ALEN]; + u8 channel; + u8 SSID_len; + u8 SSID[32]; + __le16 RSSI; + __le16 timestamp; +}; + +/** + * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event. + * + * @version: PNO version identifier. + * @status: indicates completion status of PNO scan. + * @count: amount of brcmf_pno_net_info_le entries appended. + */ +struct brcmf_pno_scanresults_le { + __le32 version; + __le32 status; + __le32 count; +}; + +/** + * struct brcmf_pno_macaddr_le - to configure PNO macaddr randomization. + * + * @version: PNO version identifier. + * @flags: Flags defining how mac addrss should be used. + * @mac: MAC address. + */ +struct brcmf_pno_macaddr_le { + u8 version; + u8 flags; + u8 mac[ETH_ALEN]; +}; + +/** + * struct brcmf_pktcnt_le - packet counters. + * + * @rx_good_pkt: packets (MSDUs & MMPDUs) received from this station + * @rx_bad_pkt: failed rx packets + * @tx_good_pkt: packets (MSDUs & MMPDUs) transmitted to this station + * @tx_bad_pkt: failed tx packets + * @rx_ocast_good_pkt: unicast packets destined for others + */ +struct brcmf_pktcnt_le { + __le32 rx_good_pkt; + __le32 rx_bad_pkt; + __le32 tx_good_pkt; + __le32 tx_bad_pkt; + __le32 rx_ocast_good_pkt; +}; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 086cac3f86d6..f82c9ab5480b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -36,6 +36,7 @@ #include "p2p.h" #include "cfg80211.h" #include "proto.h" +#include "common.h" /** * DOC: Firmware Signalling @@ -521,10 +522,6 @@ static const int brcmf_fws_prio2fifo[] = { BRCMF_FWS_FIFO_AC_VO }; -static int fcmode; -module_param(fcmode, int, S_IRUSR); -MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control"); - #define BRCMF_FWS_TLV_DEF(name, id, len) \ case BRCMF_FWS_TYPE_ ## name: \ return len; @@ -719,7 +716,7 @@ static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc, desc->state = BRCMF_FWS_STATE_OPEN; desc->requested_credit = 0; desc->requested_packet = 0; - /* depending on use may need ifp->bssidx instead */ + /* depending on use may need ifp->bsscfgidx instead */ desc->interface_id = ifidx; desc->ac_bitmap = 0xff; /* update this when handling APSD */ if (addr) @@ -1609,10 +1606,11 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, { struct brcmf_fws_info *fws = ifp->drvr->fws; - brcmf_fws_lock(fws); - if (fws) + if (fws) { + brcmf_fws_lock(fws); fws->bcmc_credit_check = true; - brcmf_fws_unlock(fws); + brcmf_fws_unlock(fws); + } return 0; } @@ -1938,7 +1936,7 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; - brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); + brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); if (!entry) return; @@ -2133,10 +2131,10 @@ int brcmf_fws_init(struct brcmf_pub *drvr) /* set linkage back */ fws->drvr = drvr; - fws->fcmode = fcmode; + fws->fcmode = drvr->settings->fcmode; if ((drvr->bus_if->always_use_fws_queue == false) && - (fcmode == BRCMF_FWS_FCMODE_NONE)) { + (fws->fcmode == BRCMF_FWS_FCMODE_NONE)) { fws->avoid_queueing = true; brcmf_dbg(INFO, "FWS queueing will be avoided\n"); return 0; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index a36bac17eafd..a36bac17eafd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 44e618f9d890..c2bdb91746cf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -34,7 +34,7 @@ #include "tracepoint.h" -#define MSGBUF_IOCTL_RESP_TIMEOUT 2000 +#define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) #define MSGBUF_TYPE_GEN_STATUS 0x1 #define MSGBUF_TYPE_RING_STATUS 0x2 @@ -466,15 +466,14 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) { return wait_event_timeout(msgbuf->ioctl_resp_wait, msgbuf->ctl_completed, - msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT)); + MSGBUF_IOCTL_RESP_TIMEOUT); } static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) { msgbuf->ctl_completed = true; - if (waitqueue_active(&msgbuf->ioctl_resp_wait)) - wake_up(&msgbuf->ioctl_resp_wait); + wake_up(&msgbuf->ioctl_resp_wait); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index 3d513e407e3d..3d513e407e3d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 03f35e0c52ca..03f35e0c52ca 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index 5f7c3550deda..5f7c3550deda 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index d224b3dd72ed..821b6494f9d1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -71,10 +71,10 @@ #define P2P_AF_MED_DWELL_TIME 400 #define P2P_AF_LONG_DWELL_TIME 1000 #define P2P_AF_TX_MAX_RETRY 1 -#define P2P_AF_MAX_WAIT_TIME 2000 +#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) #define P2P_INVALID_CHANNEL -1 #define P2P_CHANNEL_SYNC_RETRY 5 -#define P2P_AF_FRM_SCAN_MAX_WAIT 1500 +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500) #define P2P_DEFAULT_SLEEP_TIME_VSDB 200 /* WiFi P2P Public Action Frame OUI Subtypes */ @@ -102,6 +102,7 @@ #define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */ #define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */ +#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500) /** * struct brcmf_p2p_disc_st_le - set discovery state in firmware. * @@ -625,11 +626,10 @@ exit: * @num_chans: number of channels to scan. * @chanspecs: channel parameters for @num_chans channels. * @search_state: P2P discover state to use. - * @action: scan action to pass to firmware. * @bss_type: type of P2P bss. */ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, - u16 chanspecs[], s32 search_state, u16 action, + u16 chanspecs[], s32 search_state, enum p2p_bss_type bss_type) { s32 ret = 0; @@ -642,7 +642,6 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, struct brcmf_cfg80211_vif *vif; struct brcmf_p2p_scan_le *p2p_params; struct brcmf_scan_params_le *sparams; - struct brcmf_ssid ssid; memsize += num_chans * sizeof(__le16); memblk = kzalloc(memsize, GFP_KERNEL); @@ -655,16 +654,16 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, ret = -EINVAL; goto exit; } + p2p_params = (struct brcmf_p2p_scan_le *)memblk; + sparams = &p2p_params->eparams.params_le; switch (search_state) { case WL_P2P_DISC_ST_SEARCH: /* * If we in SEARCH STATE, we don't need to set SSID explictly - * because dongle use P2P WILDCARD internally by default + * because dongle use P2P WILDCARD internally by default, use + * null ssid, which it is already due to kzalloc. */ - /* use null ssid */ - ssid.SSID_len = 0; - memset(ssid.SSID, 0, sizeof(ssid.SSID)); break; case WL_P2P_DISC_ST_SCAN: /* @@ -673,8 +672,10 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, * P2P WILDCARD because we just do broadcast scan unless * setting SSID. */ - ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN; - memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len); + sparams->ssid_le.SSID_len = + cpu_to_le32(BRCMF_P2P_WILDCARD_SSID_LEN); + memcpy(sparams->ssid_le.SSID, BRCMF_P2P_WILDCARD_SSID, + BRCMF_P2P_WILDCARD_SSID_LEN); break; default: brcmf_err(" invalid search state %d\n", search_state); @@ -687,11 +688,9 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, /* * set p2p scan parameters. */ - p2p_params = (struct brcmf_p2p_scan_le *)memblk; p2p_params->type = 'E'; /* determine the scan engine parameters */ - sparams = &p2p_params->eparams.params_le; sparams->bss_type = DOT11_BSSTYPE_ANY; if (p2p->cfg->active_scan) sparams->scan_type = 0; @@ -699,9 +698,6 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, sparams->scan_type = 1; eth_broadcast_addr(sparams->bssid); - if (ssid.SSID_len) - memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); - sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS); /* @@ -742,7 +738,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, /* set the escan specific parameters */ p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); - p2p_params->eparams.action = cpu_to_le16(action); + p2p_params->eparams.action = cpu_to_le16(WL_ESCAN_ACTION_START); p2p_params->eparams.sync_id = cpu_to_le16(0x1234); /* perform p2p scan on primary device */ ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize); @@ -766,8 +762,7 @@ exit: */ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, - struct cfg80211_scan_request *request, - u16 action) + struct cfg80211_scan_request *request) { struct brcmf_p2p_info *p2p = &cfg->p2p; s32 err = 0; @@ -827,7 +822,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, num_nodfs++; } err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state, - action, P2PAPI_BSSCFG_DEVICE); + P2PAPI_BSSCFG_DEVICE); kfree(chanspecs); } exit: @@ -1096,8 +1091,7 @@ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel) default_chan_list[2] = ch.chspec; } err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list, - WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START, - P2PAPI_BSSCFG_DEVICE); + WL_P2P_DISC_ST_SEARCH, P2PAPI_BSSCFG_DEVICE); kfree(default_chan_list); exit: return err; @@ -1521,7 +1515,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, p2p->af_tx_sent_jiffies = jiffies; timeout = wait_for_completion_timeout(&p2p->send_af_done, - msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME)); + P2P_AF_MAX_WAIT_TIME); if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) { brcmf_dbg(TRACE, "TX action frame operation is success\n"); @@ -1995,7 +1989,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, return err; } err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE, - msecs_to_jiffies(1500)); + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("No BRCMF_E_IF_CHANGE event received\n"); @@ -2067,7 +2061,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, struct brcmf_if *p2p_ifp; struct brcmf_if *pri_ifp; int err; - u32 bssidx; + u32 bsscfgidx; if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) return ERR_PTR(-ENOSPC); @@ -2097,7 +2091,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, /* wait for firmware event */ err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD, - msecs_to_jiffies(1500)); + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); brcmf_fweh_p2pdev_setup(pri_ifp, false); if (!err) { @@ -2113,13 +2107,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, memcpy(&p2p_vif->wdev.address, p2p->dev_addr, sizeof(p2p->dev_addr)); /* verify bsscfg index for P2P discovery */ - err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); + err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bsscfgidx); if (err < 0) { brcmf_err("retrieving discover bsscfg index failed\n"); goto fail; } - WARN_ON(p2p_ifp->bssidx != bssidx); + WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); init_completion(&p2p->send_af_done); INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); @@ -2187,7 +2181,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, /* wait for firmware event */ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, - msecs_to_jiffies(1500)); + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("timeout occurred\n"); @@ -2237,7 +2231,6 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct brcmf_p2p_info *p2p = &cfg->p2p; struct brcmf_cfg80211_vif *vif; - unsigned long jiffie_timeout = msecs_to_jiffies(1500); bool wait_for_disable = false; int err; @@ -2270,7 +2263,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) if (wait_for_disable) wait_for_completion_timeout(&cfg->vif_disabled, - msecs_to_jiffies(500)); + BRCMF_P2P_DISABLE_TIMEOUT); err = 0; if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { @@ -2280,7 +2273,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) if (!err) { /* wait for firmware event */ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL, - jiffie_timeout); + BRCMF_VIF_EVENT_TIMEOUT); if (!err) err = -EIO; else diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 5d49059021a9..a3bd18c2360b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -112,7 +112,6 @@ struct afx_hdl { * @int_addr: P2P interface address. * @bss_idx: informate for P2P bss types. * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state. - * @ssid: ssid for P2P GO. * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state. * @remain_on_channel: contains copy of struct used by cfg80211. * @remain_on_channel_cookie: cookie counter for remain on channel cmd @@ -133,7 +132,6 @@ struct brcmf_p2p_info { u8 int_addr[ETH_ALEN]; struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX]; struct timer_list listen_timer; - struct brcmf_ssid ssid; u8 listen_channel; struct ieee80211_channel remain_on_channel; u32 remain_on_channel_cookie; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 83d804221715..0480b70e3eb8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -44,23 +44,31 @@ enum brcmf_pcie_state { BRCMFMAC_PCIE_STATE_UP }; - -#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" -#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" -#define BRCMF_PCIE_4350_FW_NAME "brcm/brcmfmac4350-pcie.bin" -#define BRCMF_PCIE_4350_NVRAM_NAME "brcm/brcmfmac4350-pcie.txt" -#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" -#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" -#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" -#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt" -#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin" -#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt" -#define BRCMF_PCIE_4365_FW_NAME "brcm/brcmfmac4365b-pcie.bin" -#define BRCMF_PCIE_4365_NVRAM_NAME "brcm/brcmfmac4365b-pcie.txt" -#define BRCMF_PCIE_4366_FW_NAME "brcm/brcmfmac4366b-pcie.bin" -#define BRCMF_PCIE_4366_NVRAM_NAME "brcm/brcmfmac4366b-pcie.txt" -#define BRCMF_PCIE_4371_FW_NAME "brcm/brcmfmac4371-pcie.bin" -#define BRCMF_PCIE_4371_NVRAM_NAME "brcm/brcmfmac4371-pcie.txt" +BRCMF_FW_NVRAM_DEF(43602, "brcmfmac43602-pcie.bin", "brcmfmac43602-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4350, "brcmfmac4350-pcie.bin", "brcmfmac4350-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4350C, "brcmfmac4350c2-pcie.bin", "brcmfmac4350c2-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-pcie.bin", "brcmfmac4356-pcie.txt"); +BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt"); + +static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFFF, 4366B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), +}; #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ @@ -183,7 +191,7 @@ enum brcmf_pcie_state { #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010 -#define BRCMF_PCIE_MBDATA_TIMEOUT 2000 +#define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000) #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C @@ -200,24 +208,6 @@ enum brcmf_pcie_state { #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 -MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4350_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4350_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME); -MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME); - - struct brcmf_pcie_console { u32 base_addr; u32 buf_addr; @@ -253,10 +243,9 @@ struct brcmf_pcie_core_info { struct brcmf_pciedev_info { enum brcmf_pcie_state state; bool in_irq; - bool irq_requested; struct pci_dev *pdev; - char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; - char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; + char fw_name[BRCMF_FW_NAME_LEN]; + char nvram_name[BRCMF_FW_NAME_LEN]; void __iomem *regs; void __iomem *tcm; u32 tcm_size; @@ -622,7 +611,9 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, brcmf_chip_resetcore(core, 0, 0, 0); } - return !brcmf_chip_set_active(devinfo->ci, resetintr); + if (!brcmf_chip_set_active(devinfo->ci, resetintr)) + return -EINVAL; + return 0; } @@ -885,7 +876,6 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) brcmf_dbg(PCIE, "Enter\n"); /* is it a v1 or v2 implementation */ - devinfo->irq_requested = false; pci_enable_msi(pdev); if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { if (request_threaded_irq(pdev->irq, @@ -908,7 +898,6 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) return -EIO; } } - devinfo->irq_requested = true; devinfo->irq_allocated = true; return 0; } @@ -926,9 +915,6 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) pdev = devinfo->pdev; brcmf_pcie_intr_disable(devinfo); - if (!devinfo->irq_requested) - return; - devinfo->irq_requested = false; free_irq(pdev->irq, devinfo); pci_disable_msi(pdev); @@ -1390,10 +1376,6 @@ static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); devinfo->wowl_enabled = enabled; - if (enabled) - device_set_wakeup_enable(&devinfo->pdev->dev, true); - else - device_set_wakeup_enable(&devinfo->pdev->dev, false); } @@ -1419,7 +1401,7 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len) } -static struct brcmf_bus_ops brcmf_pcie_bus_ops = { +static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, @@ -1484,80 +1466,6 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, } -static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo) -{ - char *fw_name; - char *nvram_name; - uint fw_len, nv_len; - char end; - - brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip, - devinfo->ci->chiprev); - - switch (devinfo->ci->chip) { - case BRCM_CC_43602_CHIP_ID: - fw_name = BRCMF_PCIE_43602_FW_NAME; - nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; - break; - case BRCM_CC_4350_CHIP_ID: - fw_name = BRCMF_PCIE_4350_FW_NAME; - nvram_name = BRCMF_PCIE_4350_NVRAM_NAME; - break; - case BRCM_CC_4356_CHIP_ID: - fw_name = BRCMF_PCIE_4356_FW_NAME; - nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; - break; - case BRCM_CC_43567_CHIP_ID: - case BRCM_CC_43569_CHIP_ID: - case BRCM_CC_43570_CHIP_ID: - fw_name = BRCMF_PCIE_43570_FW_NAME; - nvram_name = BRCMF_PCIE_43570_NVRAM_NAME; - break; - case BRCM_CC_4358_CHIP_ID: - fw_name = BRCMF_PCIE_4358_FW_NAME; - nvram_name = BRCMF_PCIE_4358_NVRAM_NAME; - break; - case BRCM_CC_4365_CHIP_ID: - fw_name = BRCMF_PCIE_4365_FW_NAME; - nvram_name = BRCMF_PCIE_4365_NVRAM_NAME; - break; - case BRCM_CC_4366_CHIP_ID: - fw_name = BRCMF_PCIE_4366_FW_NAME; - nvram_name = BRCMF_PCIE_4366_NVRAM_NAME; - break; - case BRCM_CC_4371_CHIP_ID: - fw_name = BRCMF_PCIE_4371_FW_NAME; - nvram_name = BRCMF_PCIE_4371_NVRAM_NAME; - break; - default: - brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); - return -ENODEV; - } - - fw_len = sizeof(devinfo->fw_name) - 1; - nv_len = sizeof(devinfo->nvram_name) - 1; - /* check if firmware path is provided by module parameter */ - if (brcmf_firmware_path[0] != '\0') { - strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len); - strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len); - fw_len -= strlen(devinfo->fw_name); - nv_len -= strlen(devinfo->nvram_name); - - end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; - if (end != '/') { - strncat(devinfo->fw_name, "/", fw_len); - strncat(devinfo->nvram_name, "/", nv_len); - fw_len--; - nv_len--; - } - } - strncat(devinfo->fw_name, fw_name, fw_len); - strncat(devinfo->nvram_name, nvram_name, nv_len); - - return 0; -} - - static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, u32 nvram_len) @@ -1893,7 +1801,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); dev_set_drvdata(&pdev->dev, bus); - ret = brcmf_pcie_get_fwnames(devinfo); + ret = brcmf_fw_map_chip_to_name(devinfo->ci->chip, devinfo->ci->chiprev, + brcmf_pcie_fwnames, + ARRAY_SIZE(brcmf_pcie_fwnames), + devinfo->fw_name, devinfo->nvram_name); if (ret) goto fail_bus; @@ -1959,15 +1870,14 @@ brcmf_pcie_remove(struct pci_dev *pdev) #ifdef CONFIG_PM -static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) +static int brcmf_pcie_pm_enter_D3(struct device *dev) { struct brcmf_pciedev_info *devinfo; struct brcmf_bus *bus; - int err; - brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev); + brcmf_dbg(PCIE, "Enter\n"); - bus = dev_get_drvdata(&pdev->dev); + bus = dev_get_drvdata(dev); devinfo = bus->bus_priv.pcie->devinfo; brcmf_bus_change_state(bus, BRCMF_BUS_DOWN); @@ -1975,69 +1885,51 @@ static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) devinfo->mbdata_completed = false; brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); - wait_event_timeout(devinfo->mbdata_resp_wait, - devinfo->mbdata_completed, - msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT)); + wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, + BRCMF_PCIE_MBDATA_TIMEOUT); if (!devinfo->mbdata_completed) { brcmf_err("Timeout on response for entering D3 substate\n"); return -EIO; } - brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE); - err = pci_save_state(pdev); - if (err) - brcmf_err("pci_save_state failed, err=%d\n", err); - if ((err) || (!devinfo->wowl_enabled)) { - brcmf_chip_detach(devinfo->ci); - devinfo->ci = NULL; - brcmf_pcie_remove(pdev); - return 0; - } + devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; - return pci_prepare_to_sleep(pdev); + return 0; } -static int brcmf_pcie_resume(struct pci_dev *pdev) + +static int brcmf_pcie_pm_leave_D3(struct device *dev) { struct brcmf_pciedev_info *devinfo; struct brcmf_bus *bus; + struct pci_dev *pdev; int err; - bus = dev_get_drvdata(&pdev->dev); - brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus); + brcmf_dbg(PCIE, "Enter\n"); - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - brcmf_err("pci_set_power_state failed, err=%d\n", err); - goto cleanup; - } - pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D3hot, false); - pci_enable_wake(pdev, PCI_D3cold, false); + bus = dev_get_drvdata(dev); + devinfo = bus->bus_priv.pcie->devinfo; + brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus); /* Check if device is still up and running, if so we are ready */ - if (bus) { - devinfo = bus->bus_priv.pcie->devinfo; - if (brcmf_pcie_read_reg32(devinfo, - BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { - if (brcmf_pcie_send_mb_data(devinfo, - BRCMF_H2D_HOST_D0_INFORM)) - goto cleanup; - brcmf_dbg(PCIE, "Hot resume, continue....\n"); - brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); - brcmf_bus_change_state(bus, BRCMF_BUS_UP); - brcmf_pcie_intr_enable(devinfo); - return 0; - } + if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { + brcmf_dbg(PCIE, "Try to wakeup device....\n"); + if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM)) + goto cleanup; + brcmf_dbg(PCIE, "Hot resume, continue....\n"); + devinfo->state = BRCMFMAC_PCIE_STATE_UP; + brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); + brcmf_bus_change_state(bus, BRCMF_BUS_UP); + brcmf_pcie_intr_enable(devinfo); + return 0; } cleanup: - if (bus) { - devinfo = bus->bus_priv.pcie->devinfo; - brcmf_chip_detach(devinfo->ci); - devinfo->ci = NULL; - brcmf_pcie_remove(pdev); - } + brcmf_chip_detach(devinfo->ci); + devinfo->ci = NULL; + pdev = devinfo->pdev; + brcmf_pcie_remove(pdev); + err = brcmf_pcie_probe(pdev, NULL); if (err) brcmf_err("probe after resume failed, err=%d\n", err); @@ -2046,6 +1938,14 @@ cleanup: } +static const struct dev_pm_ops brcmf_pciedrvr_pm = { + .suspend = brcmf_pcie_pm_enter_D3, + .resume = brcmf_pcie_pm_leave_D3, + .freeze = brcmf_pcie_pm_enter_D3, + .restore = brcmf_pcie_pm_leave_D3, +}; + + #endif /* CONFIG_PM */ @@ -2058,6 +1958,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID), @@ -2083,9 +1984,8 @@ static struct pci_driver brcmf_pciedrvr = { .probe = brcmf_pcie_probe, .remove = brcmf_pcie_remove, #ifdef CONFIG_PM - .suspend = brcmf_pcie_suspend, - .resume = brcmf_pcie_resume -#endif /* CONFIG_PM */ + .driver.pm = &brcmf_pciedrvr_pm, +#endif }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h index 6edaaf8ef5ce..6edaaf8ef5ce 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index 26b68c367f57..26b68c367f57 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index d55119d36755..d55119d36755 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 7e74ac3ad815..dd6614332836 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -45,8 +45,8 @@ #include "chip.h" #include "firmware.h" -#define DCMD_RESP_TIMEOUT 2000 /* In milli second */ -#define CTL_DONE_TIMEOUT 2000 /* In milli second */ +#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2000) +#define CTL_DONE_TIMEOUT msecs_to_jiffies(2000) #ifdef DEBUG @@ -460,7 +460,6 @@ struct brcmf_sdio { struct sk_buff *glomd; /* Packet containing glomming descriptor */ struct sk_buff_head glom; /* Packet list for glommed superframe */ - uint glomerr; /* Glom packet read errors */ u8 *rxbuf; /* Buffer for receiving control packets */ uint rxblen; /* Allocated length of rxbuf */ @@ -504,8 +503,7 @@ struct brcmf_sdio { struct timer_list timer; struct completion watchdog_wait; struct task_struct *watchdog_tsk; - bool wd_timer_valid; - uint save_ms; + bool wd_active; struct workqueue_struct *brcmf_wq; struct work_struct datawork; @@ -597,136 +595,41 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = { {4, 0x1} }; -#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin" -#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt" -#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin" -#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt" -#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin" -#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt" -#define BCM43241B5_FIRMWARE_NAME "brcm/brcmfmac43241b5-sdio.bin" -#define BCM43241B5_NVRAM_NAME "brcm/brcmfmac43241b5-sdio.txt" -#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin" -#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt" -#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin" -#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt" -#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin" -#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt" -#define BCM43340_FIRMWARE_NAME "brcm/brcmfmac43340-sdio.bin" -#define BCM43340_NVRAM_NAME "brcm/brcmfmac43340-sdio.txt" -#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin" -#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt" -#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin" -#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt" -#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin" -#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt" -#define BCM43430_FIRMWARE_NAME "brcm/brcmfmac43430-sdio.bin" -#define BCM43430_NVRAM_NAME "brcm/brcmfmac43430-sdio.txt" -#define BCM43455_FIRMWARE_NAME "brcm/brcmfmac43455-sdio.bin" -#define BCM43455_NVRAM_NAME "brcm/brcmfmac43455-sdio.txt" -#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin" -#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt" - -MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43143_NVRAM_NAME); -MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME); -MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME); -MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME); -MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4329_NVRAM_NAME); -MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4330_NVRAM_NAME); -MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4334_NVRAM_NAME); -MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43340_NVRAM_NAME); -MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4335_NVRAM_NAME); -MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43362_NVRAM_NAME); -MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4339_NVRAM_NAME); -MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43430_NVRAM_NAME); -MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM43455_NVRAM_NAME); -MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME); -MODULE_FIRMWARE(BCM4354_NVRAM_NAME); - -struct brcmf_firmware_names { - u32 chipid; - u32 revmsk; - const char *bin; - const char *nv; +BRCMF_FW_NVRAM_DEF(43143, "brcmfmac43143-sdio.bin", "brcmfmac43143-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43241B0, "brcmfmac43241b0-sdio.bin", + "brcmfmac43241b0-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43241B4, "brcmfmac43241b4-sdio.bin", + "brcmfmac43241b4-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43241B5, "brcmfmac43241b5-sdio.bin", + "brcmfmac43241b5-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4329, "brcmfmac4329-sdio.bin", "brcmfmac4329-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4330, "brcmfmac4330-sdio.bin", "brcmfmac4330-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4334, "brcmfmac4334-sdio.bin", "brcmfmac4334-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43340, "brcmfmac43340-sdio.bin", "brcmfmac43340-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4335, "brcmfmac4335-sdio.bin", "brcmfmac4335-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43362, "brcmfmac43362-sdio.bin", "brcmfmac43362-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43430, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt"); +BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt"); + +static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, 43241B5), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, 4329), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354) }; -enum brcmf_firmware_type { - BRCMF_FIRMWARE_BIN, - BRCMF_FIRMWARE_NVRAM -}; - -#define BRCMF_FIRMWARE_NVRAM(name) \ - name ## _FIRMWARE_NAME, name ## _NVRAM_NAME - -static const struct brcmf_firmware_names brcmf_fwname_data[] = { - { BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) }, - { BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) }, - { BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) }, - { BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) }, - { BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) }, - { BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) }, - { BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) }, - { BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) }, - { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, - { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, - { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }, - { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) }, - { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) }, - { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) } -}; - -static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci, - struct brcmf_sdio_dev *sdiodev) -{ - int i; - char end; - - for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { - if (brcmf_fwname_data[i].chipid == ci->chip && - brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) - break; - } - - if (i == ARRAY_SIZE(brcmf_fwname_data)) { - brcmf_err("Unknown chipid %d [%d]\n", ci->chip, ci->chiprev); - return -ENODEV; - } - - /* check if firmware path is provided by module parameter */ - if (brcmf_firmware_path[0] != '\0') { - strlcpy(sdiodev->fw_name, brcmf_firmware_path, - sizeof(sdiodev->fw_name)); - strlcpy(sdiodev->nvram_name, brcmf_firmware_path, - sizeof(sdiodev->nvram_name)); - - end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; - if (end != '/') { - strlcat(sdiodev->fw_name, "/", - sizeof(sdiodev->fw_name)); - strlcat(sdiodev->nvram_name, "/", - sizeof(sdiodev->nvram_name)); - } - } - strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin, - sizeof(sdiodev->fw_name)); - strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, - sizeof(sdiodev->nvram_name)); - - return 0; -} - static void pkt_align(struct sk_buff *p, int len, int align) { uint datalign; @@ -1057,7 +960,7 @@ end: brcmf_sdio_clkctl(bus, CLK_NONE, pendok); } else { brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); + brcmf_sdio_wd_timer(bus, true); } bus->sleeping = sleep; brcmf_dbg(SDIO, "new state %s\n", @@ -1654,20 +1557,15 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) sdio_release_host(bus->sdiodev->func[1]); bus->sdcnt.f2rxdata++; - /* On failure, kill the superframe, allow a couple retries */ + /* On failure, kill the superframe */ if (errcode < 0) { brcmf_err("glom read of %d bytes failed: %d\n", dlen, errcode); sdio_claim_host(bus->sdiodev->func[1]); - if (bus->glomerr++ < 3) { - brcmf_sdio_rxfail(bus, true, true); - } else { - bus->glomerr = 0; - brcmf_sdio_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; - brcmf_sdio_free_glom(bus); - } + brcmf_sdio_rxfail(bus, true, false); + bus->sdcnt.rxglomfail++; + brcmf_sdio_free_glom(bus); sdio_release_host(bus->sdiodev->func[1]); return 0; } @@ -1708,19 +1606,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) } if (errcode) { - /* Terminate frame on error, request - a couple retries */ + /* Terminate frame on error */ sdio_claim_host(bus->sdiodev->func[1]); - if (bus->glomerr++ < 3) { - /* Restore superframe header space */ - skb_push(pfirst, sfdoff); - brcmf_sdio_rxfail(bus, true, true); - } else { - bus->glomerr = 0; - brcmf_sdio_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; - brcmf_sdio_free_glom(bus); - } + brcmf_sdio_rxfail(bus, true, false); + bus->sdcnt.rxglomfail++; + brcmf_sdio_free_glom(bus); sdio_release_host(bus->sdiodev->func[1]); bus->cur_read.len = 0; return 0; @@ -1767,7 +1657,7 @@ static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, bool *pending) { DECLARE_WAITQUEUE(wait, current); - int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT); + int timeout = DCMD_RESP_TIMEOUT; /* Wait until control frame is available */ add_wait_queue(&bus->dcmd_resp_wait, &wait); @@ -1787,8 +1677,7 @@ static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus) { - if (waitqueue_active(&bus->dcmd_resp_wait)) - wake_up_interruptible(&bus->dcmd_resp_wait); + wake_up_interruptible(&bus->dcmd_resp_wait); return 0; } @@ -2112,8 +2001,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) static void brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus) { - if (waitqueue_active(&bus->ctrl_wait)) - wake_up_interruptible(&bus->ctrl_wait); + wake_up_interruptible(&bus->ctrl_wait); return; } @@ -2954,7 +2842,7 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) brcmf_sdio_trigger_dpc(bus); wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, - msecs_to_jiffies(CTL_DONE_TIMEOUT)); + CTL_DONE_TIMEOUT); ret = 0; if (bus->ctrl_frame_stat) { sdio_claim_host(bus->sdiodev->func[1]); @@ -3664,7 +3552,7 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) /* Poll for console output periodically */ if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() && bus->console_interval != 0) { - bus->console.count += BRCMF_WD_POLL_MS; + bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL); if (bus->console.count >= bus->console_interval) { bus->console.count -= bus->console_interval; sdio_claim_host(bus->sdiodev->func[1]); @@ -3687,7 +3575,7 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func[1]); - brcmf_sdio_wd_timer(bus, 0); + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func[1]); @@ -4019,13 +3907,13 @@ brcmf_sdio_watchdog(unsigned long data) if (bus->watchdog_tsk) { complete(&bus->watchdog_wait); /* Reschedule the watchdog */ - if (bus->wd_timer_valid) + if (bus->wd_active) mod_timer(&bus->timer, - jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS)); + jiffies + BRCMF_WD_POLL); } } -static struct brcmf_bus_ops brcmf_sdio_bus_ops = { +static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .stop = brcmf_sdio_bus_stop, .preinit = brcmf_sdio_bus_preinit, .txdata = brcmf_sdio_bus_txdata, @@ -4061,7 +3949,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, /* Start the watchdog timer */ bus->sdcnt.tickcnt = 0; - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); + brcmf_sdio_wd_timer(bus, true); sdio_claim_host(sdiodev->func[1]); @@ -4266,7 +4154,10 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) brcmf_sdio_debugfs_create(bus); brcmf_dbg(INFO, "completed!!\n"); - ret = brcmf_sdio_get_fwnames(bus->ci, sdiodev); + ret = brcmf_fw_map_chip_to_name(bus->ci->chip, bus->ci->chiprev, + brcmf_sdio_fwnames, + ARRAY_SIZE(brcmf_sdio_fwnames), + sdiodev->fw_name, sdiodev->nvram_name); if (ret) goto fail; @@ -4303,7 +4194,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) if (bus->ci) { if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { sdio_claim_host(bus->sdiodev->func[1]); - brcmf_sdio_wd_timer(bus, 0); + brcmf_sdio_wd_timer(bus, false); brcmf_sdio_clkctl(bus, CLK_AVAIL, false); /* Leave the device in state where it is * 'passive'. This is done by resetting all @@ -4325,13 +4216,12 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Disconnected\n"); } -void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick) +void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active) { /* Totally stop the timer */ - if (!wdtick && bus->wd_timer_valid) { + if (!active && bus->wd_active) { del_timer_sync(&bus->timer); - bus->wd_timer_valid = false; - bus->save_ms = wdtick; + bus->wd_active = false; return; } @@ -4339,27 +4229,18 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick) if (bus->sdiodev->state != BRCMF_SDIOD_DATA) return; - if (wdtick) { - if (bus->save_ms != BRCMF_WD_POLL_MS) { - if (bus->wd_timer_valid) - /* Stop timer and restart at new value */ - del_timer_sync(&bus->timer); - + if (active) { + if (!bus->wd_active) { /* Create timer again when watchdog period is dynamically changed or in the first instance */ - bus->timer.expires = - jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS); + bus->timer.expires = jiffies + BRCMF_WD_POLL; add_timer(&bus->timer); - + bus->wd_active = true; } else { /* Re arm the timer, at last watchdog period */ - mod_timer(&bus->timer, - jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS)); + mod_timer(&bus->timer, jiffies + BRCMF_WD_POLL); } - - bus->wd_timer_valid = true; - bus->save_ms = wdtick; } } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 7328478b2d7b..5ec7a6d87672 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -152,8 +152,8 @@ /* Packet alignment for most efficient SDIO (can change based on platform) */ #define BRCMF_SDALIGN (1 << 6) -/* watchdog polling interval in ms */ -#define BRCMF_WD_POLL_MS 10 +/* watchdog polling interval */ +#define BRCMF_WD_POLL msecs_to_jiffies(10) /** * enum brcmf_sdiod_state - the state of the bus. @@ -195,8 +195,8 @@ struct brcmf_sdio_dev { uint max_segment_size; uint txglomsz; struct sg_table sgtable; - char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; - char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; + char fw_name[BRCMF_FW_NAME_LEN]; + char nvram_name[BRCMF_FW_NAME_LEN]; bool wowl_enabled; enum brcmf_sdiod_state state; struct brcmf_sdiod_freezer *freezer; @@ -369,7 +369,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); void brcmf_sdio_remove(struct brcmf_sdio *bus); void brcmf_sdio_isr(struct brcmf_sdio *bus); -void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick); +void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active); void brcmf_sdio_wowl_config(struct device *dev, bool enabled); int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep); void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c index a10f35c5eb3d..a10f35c5eb3d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h index 4d7d51f95716..4d7d51f95716 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 689e64d004bc..c72b7b352a77 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -29,7 +29,7 @@ #include "usb.h" -#define IOCTL_RESP_TIMEOUT 2000 +#define IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) #define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */ #define BRCMF_USB_RESET_GETVER_LOOP_CNT 10 @@ -43,10 +43,20 @@ #define BRCMF_USB_CBCTL_READ 1 #define BRCMF_USB_MAX_PKT_SIZE 1600 -#define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin" -#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin" -#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin" -#define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin" +BRCMF_FW_DEF(43143, "brcmfmac43143.bin"); +BRCMF_FW_DEF(43236B, "brcmfmac43236b.bin"); +BRCMF_FW_DEF(43242A, "brcmfmac43242a.bin"); +BRCMF_FW_DEF(43569, "brcmfmac43569.bin"); + +static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { + BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), + BRCMF_FW_ENTRY(BRCM_CC_43235_CHIP_ID, 0x00000008, 43236B), + BRCMF_FW_ENTRY(BRCM_CC_43236_CHIP_ID, 0x00000008, 43236B), + BRCMF_FW_ENTRY(BRCM_CC_43238_CHIP_ID, 0x00000008, 43236B), + BRCMF_FW_ENTRY(BRCM_CC_43242_CHIP_ID, 0xFFFFFFFF, 43242A), + BRCMF_FW_ENTRY(BRCM_CC_43566_CHIP_ID, 0xFFFFFFFF, 43569), + BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43569) +}; #define TRX_MAGIC 0x30524448 /* "HDR0" */ #define TRX_MAX_OFFSET 3 /* Max number of file offsets */ @@ -139,6 +149,7 @@ struct brcmf_usbdev_info { struct brcmf_usbreq *tx_reqs; struct brcmf_usbreq *rx_reqs; + char fw_name[BRCMF_FW_NAME_LEN]; const u8 *image; /* buffer for combine fw and nvram */ int image_len; @@ -179,14 +190,12 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev) static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo) { return wait_event_timeout(devinfo->ioctl_resp_wait, - devinfo->ctl_completed, - msecs_to_jiffies(IOCTL_RESP_TIMEOUT)); + devinfo->ctl_completed, IOCTL_RESP_TIMEOUT); } static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo) { - if (waitqueue_active(&devinfo->ioctl_resp_wait)) - wake_up(&devinfo->ioctl_resp_wait); + wake_up(&devinfo->ioctl_resp_wait); } static void @@ -983,45 +992,15 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo) return 0; } -static bool brcmf_usb_chip_support(int chipid, int chiprev) -{ - switch(chipid) { - case BRCM_CC_43143_CHIP_ID: - return true; - case BRCM_CC_43235_CHIP_ID: - case BRCM_CC_43236_CHIP_ID: - case BRCM_CC_43238_CHIP_ID: - return (chiprev == 3); - case BRCM_CC_43242_CHIP_ID: - return true; - case BRCM_CC_43566_CHIP_ID: - case BRCM_CC_43569_CHIP_ID: - return true; - default: - break; - } - return false; -} - static int brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo) { - int devid, chiprev; int err; brcmf_dbg(USB, "Enter\n"); if (devinfo == NULL) return -ENODEV; - devid = devinfo->bus_pub.devid; - chiprev = devinfo->bus_pub.chiprev; - - if (!brcmf_usb_chip_support(devid, chiprev)) { - brcmf_err("unsupported chip %d rev %d\n", - devid, chiprev); - return -EINVAL; - } - if (!devinfo->image) { brcmf_err("No firmware!\n"); return -ENOENT; @@ -1071,25 +1050,6 @@ static int check_file(const u8 *headers) return -1; } -static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo) -{ - switch (devinfo->bus_pub.devid) { - case BRCM_CC_43143_CHIP_ID: - return BRCMF_USB_43143_FW_NAME; - case BRCM_CC_43235_CHIP_ID: - case BRCM_CC_43236_CHIP_ID: - case BRCM_CC_43238_CHIP_ID: - return BRCMF_USB_43236_FW_NAME; - case BRCM_CC_43242_CHIP_ID: - return BRCMF_USB_43242_FW_NAME; - case BRCM_CC_43566_CHIP_ID: - case BRCM_CC_43569_CHIP_ID: - return BRCMF_USB_43569_FW_NAME; - default: - return NULL; - } -} - static struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, @@ -1163,7 +1123,7 @@ static void brcmf_usb_wowl_config(struct device *dev, bool enabled) device_set_wakeup_enable(devinfo->dev, false); } -static struct brcmf_bus_ops brcmf_usb_bus_ops = { +static const struct brcmf_bus_ops brcmf_usb_bus_ops = { .txdata = brcmf_usb_tx, .stop = brcmf_usb_down, .txctl = brcmf_usb_tx_ctlpkt, @@ -1274,9 +1234,16 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->chip = bus_pub->devid; bus->chiprev = bus_pub->chiprev; + ret = brcmf_fw_map_chip_to_name(bus_pub->devid, bus_pub->chiprev, + brcmf_usb_fwnames, + ARRAY_SIZE(brcmf_usb_fwnames), + devinfo->fw_name, NULL); + if (ret) + goto fail; + /* request firmware here */ - ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), - NULL, brcmf_usb_probe_phase2); + ret = brcmf_fw_get_firmwares(dev, 0, devinfo->fw_name, NULL, + brcmf_usb_probe_phase2); if (ret) { brcmf_err("firmware request failed: %d\n", ret); goto fail; @@ -1472,8 +1439,7 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf) brcmf_dbg(USB, "Enter\n"); - return brcmf_fw_get_firmwares(&usb->dev, 0, - brcmf_usb_get_fwname(devinfo), NULL, + return brcmf_fw_get_firmwares(&usb->dev, 0, devinfo->fw_name, NULL, brcmf_usb_probe_phase2); } @@ -1485,16 +1451,13 @@ static struct usb_device_id brcmf_usb_devid_table[] = { BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID), + { USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) }, /* special entry for device with firmware loaded and running */ BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); -MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME); -MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); -MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME); -MODULE_FIRMWARE(BRCMF_USB_43569_FW_NAME); static struct usb_driver brcmf_usbdrvr = { .name = KBUILD_MODNAME, @@ -1504,7 +1467,6 @@ static struct usb_driver brcmf_usbdrvr = { .suspend = brcmf_usb_suspend, .resume = brcmf_usb_resume, .reset_resume = brcmf_usb_reset_resume, - .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h index f483a8c9945b..f483a8c9945b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c index 8eff2753abad..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h index 061b7bfa2e1c..061b7bfa2e1c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile index 32464acccd90..960e6b86bbcb 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile @@ -17,9 +17,9 @@ ccflags-y := \ -D__CHECK_ENDIAN__ \ - -Idrivers/net/wireless/brcm80211/brcmsmac \ - -Idrivers/net/wireless/brcm80211/brcmsmac/phy \ - -Idrivers/net/wireless/brcm80211/include + -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \ + -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \ + -Idrivers/net/wireless/broadcom/brcm80211/include brcmsmac-y := \ mac80211_if.o \ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c index 53365977bfd6..53365977bfd6 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h index 2d08c155c23b..2d08c155c23b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index fa391e4eb098..fa391e4eb098 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.h index 03bdcf29bd50..03bdcf29bd50 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c index 54c616919590..54c616919590 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.h index a3d487ab1964..a3d487ab1964 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h index a0da3248b942..a0da3248b942 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h index 0e8a69ab909f..0e8a69ab909f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h index cf2cc070f1e5..cf2cc070f1e5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.c index 52fc9eeb5fa5..52fc9eeb5fa5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.h index cbf2f06436fc..cbf2f06436fc 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c index 635ae034c7e5..38bd5890bd53 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c @@ -177,8 +177,8 @@ static bool brcms_c_country_valid(const char *ccode) * only allow ascii alpha uppercase for the first 2 * chars. */ - if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && - (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A)) + if (!((ccode[0] & 0x80) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && + (ccode[1] & 0x80) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A)) return false; /* diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.h index 39dd3a5b2979..39dd3a5b2979 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h index 9035cc4d6ff3..9035cc4d6ff3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c index 7a1fbb2e3a71..7a1fbb2e3a71 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h index 822781cf15d4..822781cf15d4 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c index 796f5f9d5d5a..796f5f9d5d5a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h index ff5b80b09046..ff5b80b09046 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c index 74b17cecb189..74b17cecb189 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/led.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h index 17a0b1f5dbcf..17a0b1f5dbcf 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/led.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index bec2dc1ca2e4..bec2dc1ca2e4 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h index 198053dfc310..198053dfc310 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 218cbc8bf3a7..218cbc8bf3a7 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h index c4d135cff04a..c4d135cff04a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index 1c4e9dd57960..1c4e9dd57960 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h index 4d3734f48d9c..4d3734f48d9c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h index 4960f7d26804..4960f7d26804 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 93d4cde0eb31..93d4cde0eb31 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h index f4a8ab09da43..f4a8ab09da43 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 99dac9b8a082..99dac9b8a082 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c index faf1ebe76068..faf1ebe76068 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h index 20e3783f921b..20e3783f921b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_radio.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h index c3a675455ff5..c3a675455ff5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_radio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phyreg_n.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h index a97c3a799479..a97c3a799479 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phyreg_n.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c index d7fa312214f3..d7fa312214f3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h index 489422a36085..489422a36085 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c index dbf50ef6cd75..dbf50ef6cd75 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h index dc8a84e85117..dc8a84e85117 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c index a0de5db0cd64..a0de5db0cd64 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h index dd8774717ade..dd8774717ade 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c index 71b80381f3ad..71b80381f3ad 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.h index a014bbc4f935..a014bbc4f935 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h index 4da38cb4f318..4da38cb4f318 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.c index 0a0c0ad4f96f..0a0c0ad4f96f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/rate.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.h index 5bb88b78ed64..5bb88b78ed64 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h index 3a3d73699f83..3a3d73699f83 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/scb.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c index dd9162722495..dd9162722495 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h index ba9493009a33..ba9493009a33 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h index ae1f3ad40d45..ae1f3ad40d45 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.c index 80e3ccf865e3..80e3ccf865e3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.c diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.h index c87dd89bcb78..c87dd89bcb78 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.h diff --git a/drivers/net/wireless/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile index 8a928184016a..256c91f9ac4b 100644 --- a/drivers/net/wireless/brcm80211/brcmutil/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile @@ -16,8 +16,8 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y := \ - -Idrivers/net/wireless/brcm80211/brcmutil \ - -Idrivers/net/wireless/brcm80211/include + -Idrivers/net/wireless/broadcom/brcm80211/brcmutil \ + -Idrivers/net/wireless/broadcom/brcm80211/include obj-$(CONFIG_BRCMUTIL) += brcmutil.o brcmutil-objs = utils.o d11.o diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index 2b2522bdd8eb..2b2522bdd8eb 100644 --- a/drivers/net/wireless/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c index 0543607002fd..0543607002fd 100644 --- a/drivers/net/wireless/brcm80211/brcmutil/utils.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index aa06ea231db3..699f2c2782ee 100644 --- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -21,6 +21,7 @@ #include <linux/mmc/sdio_ids.h> #define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c +#define BRCM_USB_VENDOR_ID_LG 0x043e #define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM /* Chipcommon Core Chip IDs */ @@ -47,6 +48,7 @@ #define BRCM_CC_43569_CHIP_ID 43569 #define BRCM_CC_43570_CHIP_ID 43570 #define BRCM_CC_4358_CHIP_ID 0x4358 +#define BRCM_CC_4359_CHIP_ID 0x4359 #define BRCM_CC_43602_CHIP_ID 43602 #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 @@ -56,6 +58,7 @@ #define BRCM_USB_43143_DEVICE_ID 0xbd1e #define BRCM_USB_43236_DEVICE_ID 0xbd17 #define BRCM_USB_43242_DEVICE_ID 0xbd1f +#define BRCM_USB_43242_LG_DEVICE_ID 0x3101 #define BRCM_USB_43569_DEVICE_ID 0xbd27 #define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc @@ -66,6 +69,7 @@ #define BRCM_PCIE_43567_DEVICE_ID 0x43d3 #define BRCM_PCIE_43570_DEVICE_ID 0x43d9 #define BRCM_PCIE_4358_DEVICE_ID 0x43e9 +#define BRCM_PCIE_4359_DEVICE_ID 0x43ef #define BRCM_PCIE_43602_DEVICE_ID 0x43ba #define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb #define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h index f9745ea8b3e0..f9745ea8b3e0 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h index 41969527b459..41969527b459 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h index 76b5d3a86294..3f68dd5ecd11 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h @@ -237,9 +237,6 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec) #define WPA2_AUTH_RESERVED4 0x0400 #define WPA2_AUTH_RESERVED5 0x0800 -/* pmkid */ -#define MAXPMKID 16 - #define DOT11_DEFAULT_RTS_LEN 2347 #define DOT11_DEFAULT_FRAG_LEN 2346 @@ -251,24 +248,4 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec) #define HT_CAP_RX_STBC_NO 0x0 #define HT_CAP_RX_STBC_ONE_STREAM 0x1 -struct pmkid { - u8 BSSID[ETH_ALEN]; - u8 PMKID[WLAN_PMKID_LEN]; -}; - -struct pmkid_list { - __le32 npmkid; - struct pmkid pmkid[1]; -}; - -struct pmkid_cand { - u8 BSSID[ETH_ALEN]; - u8 preauth; -}; - -struct pmkid_cand_list { - u32 npmkid_cand; - struct pmkid_cand pmkid_cand[1]; -}; - #endif /* _BRCMU_WIFI_H_ */ diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h index e1fd499930a0..e1fd499930a0 100644 --- a/drivers/net/wireless/brcm80211/include/chipcommon.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/broadcom/brcm80211/include/defs.h index 8d1e85e0ed51..8d1e85e0ed51 100644 --- a/drivers/net/wireless/brcm80211/include/defs.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/defs.h diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/broadcom/brcm80211/include/soc.h index 123cfa854a0d..123cfa854a0d 100644 --- a/drivers/net/wireless/brcm80211/include/soc.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/soc.h diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig new file mode 100644 index 000000000000..b22567dff893 --- /dev/null +++ b/drivers/net/wireless/cisco/Kconfig @@ -0,0 +1,56 @@ +config WLAN_VENDOR_CISCO + bool "Cisco devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_CISCO + +config AIRO + tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" + depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN) + select WIRELESS_EXT + select CRYPTO + select WEXT_SPY + select WEXT_PRIV + ---help--- + This is the standard Linux driver to support Cisco/Aironet ISA and + PCI 802.11 wireless cards. + It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X + - with or without encryption) as well as card before the Cisco + acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). + + This driver support both the standard Linux Wireless Extensions + and Cisco proprietary API, so both the Linux Wireless Tools and the + Cisco Linux utilities can be used to configure the card. + + The driver can be compiled as a module and will be named "airo". + +config AIRO_CS + tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" + depends on CFG80211 && PCMCIA && (BROKEN || !M32R) + select WIRELESS_EXT + select WEXT_SPY + select WEXT_PRIV + select CRYPTO + select CRYPTO_AES + ---help--- + This is the standard Linux driver to support Cisco/Aironet PCMCIA + 802.11 wireless cards. This driver is the same as the Aironet + driver part of the Linux Pcmcia package. + It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X + - with or without encryption) as well as card before the Cisco + acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also + supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom + 802.11b cards. + + This driver support both the standard Linux Wireless Extensions + and Cisco proprietary API, so both the Linux Wireless Tools and the + Cisco Linux utilities can be used to configure the card. + +endif # WLAN_VENDOR_CISCO diff --git a/drivers/net/wireless/cisco/Makefile b/drivers/net/wireless/cisco/Makefile new file mode 100644 index 000000000000..d4110b19d6ef --- /dev/null +++ b/drivers/net/wireless/cisco/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_AIRO) += airo.o +obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/cisco/airo.c index 17c40f06f13e..d2353f6e5214 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5137,21 +5137,9 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) { memset(APList_rid, 0, sizeof(*APList_rid)); APList_rid->len = cpu_to_le16(sizeof(*APList_rid)); - for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) { - int j; - for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) { - switch(j%3) { - case 0: - APList_rid->ap[i][j/3]= - hex_to_bin(data->wbuffer[j+i*6*3])<<4; - break; - case 1: - APList_rid->ap[i][j/3]|= - hex_to_bin(data->wbuffer[j+i*6*3]); - break; - } - } - } + for (i = 0; i < 4 && data->writelen >= (i + 1) * 6 * 3; i++) + mac_pton(data->wbuffer + i * 6 * 3, APList_rid->ap[i]); + disable_MAC(ai, 1); writeAPListRid(ai, APList_rid, 1); enable_MAC(ai, 1); diff --git a/drivers/net/wireless/airo.h b/drivers/net/wireless/cisco/airo.h index e480adf86be6..e480adf86be6 100644 --- a/drivers/net/wireless/airo.h +++ b/drivers/net/wireless/cisco/airo.h diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c index d9ed22b4cc6b..d9ed22b4cc6b 100644 --- a/drivers/net/wireless/airo_cs.c +++ b/drivers/net/wireless/cisco/airo_cs.c diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig new file mode 100644 index 000000000000..5b14f2f64a8a --- /dev/null +++ b/drivers/net/wireless/intel/Kconfig @@ -0,0 +1,18 @@ +config WLAN_VENDOR_INTEL + bool "Intel devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_INTEL + +source "drivers/net/wireless/intel/ipw2x00/Kconfig" +source "drivers/net/wireless/intel/iwlegacy/Kconfig" +source "drivers/net/wireless/intel/iwlwifi/Kconfig" + +endif # WLAN_VENDOR_INTEL diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile new file mode 100644 index 000000000000..c9cbcc85b569 --- /dev/null +++ b/drivers/net/wireless/intel/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_IPW2100) += ipw2x00/ +obj-$(CONFIG_IPW2200) += ipw2x00/ + +obj-$(CONFIG_IWLEGACY) += iwlegacy/ + +obj-$(CONFIG_IWLWIFI) += iwlwifi/ diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig index d6ec44d7a391..d6ec44d7a391 100644 --- a/drivers/net/wireless/ipw2x00/Kconfig +++ b/drivers/net/wireless/intel/ipw2x00/Kconfig diff --git a/drivers/net/wireless/ipw2x00/Makefile b/drivers/net/wireless/intel/ipw2x00/Makefile index aecd2cff462b..aecd2cff462b 100644 --- a/drivers/net/wireless/ipw2x00/Makefile +++ b/drivers/net/wireless/intel/ipw2x00/Makefile diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/intel/ipw2x00/ipw.h index 4007bf5ed6f3..4007bf5ed6f3 100644 --- a/drivers/net/wireless/ipw2x00/ipw.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw.h diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 36818c7f30b9..f93a7f71c047 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -2311,8 +2311,10 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv, packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); - /* NOTE: pci_map_single does not return an error code, and 0 is a valid - * dma_addr */ + if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) { + dev_kfree_skb(packet->skb); + return -ENOMEM; + } return 0; } @@ -3183,6 +3185,11 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) LIBIPW_3ADDR_LEN, tbd->buf_length, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pci_dev, + tbd->host_addr)) { + IPW_DEBUG_TX("dma mapping error\n"); + break; + } IPW_DEBUG_TX("data frag tbd TX%d P=%08x L=%d\n", txq->next, tbd->host_addr, diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index 193947865efd..193947865efd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ed0adaf1eec4..ed0adaf1eec4 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index aa301d1eee3c..aa301d1eee3c 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index b0571618c2ed..b0571618c2ed 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c index 218f2a32de21..218f2a32de21 100644 --- a/drivers/net/wireless/ipw2x00/libipw_geo.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c index 60f28740f6af..60f28740f6af 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c index cef7f7d79cd9..cef7f7d79cd9 100644 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index e8c039879b05..e8c039879b05 100644 --- a/drivers/net/wireless/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index dd29f46d086b..dd29f46d086b 100644 --- a/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c index c1b4441fb8b2..c1b4441fb8b2 100644 --- a/drivers/net/wireless/iwlegacy/3945-debug.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index af1b3e6839fa..af1b3e6839fa 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index 76b0729ade17..76b0729ade17 100644 --- a/drivers/net/wireless/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 93bdf684babe..93bdf684babe 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h index 00030d43a194..00030d43a194 100644 --- a/drivers/net/wireless/iwlegacy/3945.h +++ b/drivers/net/wireless/intel/iwlegacy/3945.h diff --git a/drivers/net/wireless/iwlegacy/4965-calib.c b/drivers/net/wireless/intel/iwlegacy/4965-calib.c index e78bdefb8952..e78bdefb8952 100644 --- a/drivers/net/wireless/iwlegacy/4965-calib.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-calib.c diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/intel/iwlegacy/4965-debug.c index e0597bfdddb8..e0597bfdddb8 100644 --- a/drivers/net/wireless/iwlegacy/4965-debug.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-debug.c diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 6656215a13a9..fd38aa0763e4 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6416,7 +6416,7 @@ il4965_hw_detect(struct il_priv *il) D_INFO("HW Revision ID = 0x%X\n", il->rev_id); } -static struct il_sensitivity_ranges il4965_sensitivity = { +static const struct il_sensitivity_ranges il4965_sensitivity = { .min_nrg_cck = 97, .max_nrg_cck = 0, /* not used, set to 0 */ diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index bac60b2bc3f0..bac60b2bc3f0 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c index fe47db9c20cd..fe47db9c20cd 100644 --- a/drivers/net/wireless/iwlegacy/4965.c +++ b/drivers/net/wireless/intel/iwlegacy/4965.c diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h index 8ab8706f9422..8ab8706f9422 100644 --- a/drivers/net/wireless/iwlegacy/4965.h +++ b/drivers/net/wireless/intel/iwlegacy/4965.h diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig index fb919727b8bb..fb919727b8bb 100644 --- a/drivers/net/wireless/iwlegacy/Kconfig +++ b/drivers/net/wireless/intel/iwlegacy/Kconfig diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile index c985a01a0731..c985a01a0731 100644 --- a/drivers/net/wireless/iwlegacy/Makefile +++ b/drivers/net/wireless/intel/iwlegacy/Makefile diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h index dd744135c956..dd744135c956 100644 --- a/drivers/net/wireless/iwlegacy/commands.h +++ b/drivers/net/wireless/intel/iwlegacy/commands.h diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 887114582583..eb5cb603bc52 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -1865,14 +1865,14 @@ il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) cmd.len = il->ops->build_addsta_hcmd(sta, data); ret = il_send_cmd(il, &cmd); - - if (ret || (flags & CMD_ASYNC)) + if (ret) return ret; + if (flags & CMD_ASYNC) + return 0; + + pkt = (struct il_rx_pkt *)cmd.reply_page; + ret = il_process_add_sta_resp(il, sta, pkt, true); - if (ret == 0) { - pkt = (struct il_rx_pkt *)cmd.reply_page; - ret = il_process_add_sta_resp(il, sta, pkt, true); - } il_free_pages(il, cmd.reply_page); return ret; @@ -3602,7 +3602,7 @@ il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) } EXPORT_SYMBOL(il_is_ht40_tx_allowed); -static u16 +static u16 noinline il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) { u16 new_val; diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index ce52cf114fde..ce52cf114fde 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h diff --git a/drivers/net/wireless/iwlegacy/csr.h b/drivers/net/wireless/intel/iwlegacy/csr.h index 9138e15004fa..9138e15004fa 100644 --- a/drivers/net/wireless/iwlegacy/csr.h +++ b/drivers/net/wireless/intel/iwlegacy/csr.h diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c index 908b9f4fef6f..908b9f4fef6f 100644 --- a/drivers/net/wireless/iwlegacy/debug.c +++ b/drivers/net/wireless/intel/iwlegacy/debug.c diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h index 85fe48e520f9..85fe48e520f9 100644 --- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h +++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h diff --git a/drivers/net/wireless/iwlegacy/prph.h b/drivers/net/wireless/intel/iwlegacy/prph.h index ffec4b4a248a..ffec4b4a248a 100644 --- a/drivers/net/wireless/iwlegacy/prph.h +++ b/drivers/net/wireless/intel/iwlegacy/prph.h diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 6e949df399d6..866067789330 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -53,7 +53,7 @@ config IWLWIFI_LEDS config IWLDVM tristate "Intel Wireless WiFi DVM Firmware support" - default IWLWIFI + depends on m help This is the driver that supports the DVM firmware. The list of the devices that use this firmware is available here: diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index dbfc5b18bcb7..05828c61d1ab 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o -iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o +iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile index 4d19685f31c3..4d19685f31c3 100644 --- a/drivers/net/wireless/iwlwifi/dvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 991def878881..9de277c6c420 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -473,13 +473,4 @@ do { \ } while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ -extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1]; - -static inline const char *iwl_dvm_get_cmd_string(u8 cmd) -{ - const char *s = iwl_dvm_cmd_strings[cmd]; - if (s) - return s; - return "UNKNOWN"; -} #endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c index 20e6aa910700..07a4c644fb9b 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -311,7 +311,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv, /* If previous beacon had too many false alarms, * give it some extra margin by reducing sensitivity again * (but don't go below measured energy of desired Rx) */ - if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + if (data->nrg_prev_state == IWL_FA_TOO_MANY) { IWL_DEBUG_CALIB(priv, "... increasing margin\n"); if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) data->nrg_th_cck -= NRG_MARGIN; diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h index aeae4e80ea40..099e3ce80ffc 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 7a34e4d158d1..2ab2773655a8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index b15e44f8d1bd..74c51615244e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -22,7 +22,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ @@ -32,7 +32,9 @@ #include <linux/debugfs.h> #include <linux/ieee80211.h> #include <net/mac80211.h> + #include "iwl-debug.h" +#include "iwl-trans.h" #include "iwl-io.h" #include "dev.h" #include "agn.h" @@ -438,7 +440,7 @@ static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file, if (priv->rx_handlers_stats[cnt] > 0) pos += scnprintf(buf + pos, bufsz - pos, "\tRx handler[%36s]:\t\t %u\n", - iwl_dvm_get_cmd_string(cnt), + iwl_get_cmd_string(priv->trans, (u32)cnt), priv->rx_handlers_stats[cnt]); } diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 0ba3e56d6015..1a7ead753eee 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index 34b41e5f7cfc..cc13c04063a5 100644 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index ca4d6692cc4e..1aabb5ec096f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -134,8 +134,6 @@ static int iwl_led_cmd(struct iwl_priv *priv, on = IWL_LED_SOLID; } - IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - priv->cfg->base_params->led_compensation); led_cmd.on = iwl_blink_compensation(priv, on, priv->cfg->base_params->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h index 1c6b2252d0f2..75f74edd018f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/led.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index e18629a16fb0..4841be2aa499 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -22,7 +22,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) priv->ucode_loaded = false; iwl_trans_stop_device(priv->trans); + ret = iwl_trans_start_hw(priv->trans); + if (ret) + goto out; priv->wowlan = true; @@ -1262,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) if (test_bit(STATUS_FW_ERROR, &priv->status)) { IWL_ERR(priv, "Command %s failed: FW Error\n", - iwl_dvm_get_cmd_string(cmd->id)); + iwl_get_cmd_string(priv->trans, cmd->id)); return -EIO; } diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index b3ad34e8bf5a..29ea1c6705b4 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -115,6 +115,9 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + if (priv->trans->max_skb_frags) + hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; + hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; @@ -1411,13 +1414,7 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); - if (WARN_ON(ctx->vif != vif)) { - struct iwl_rxon_context *tmp; - IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); - for_each_context(priv, tmp) - IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", - tmp->ctxid, tmp, tmp->vif); - } + WARN_ON(ctx->vif != vif); ctx->vif = NULL; iwl_teardown_interface(priv, vif, false); diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index e7616f0ee6e8..f62c2d727ddb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -22,7 +23,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -69,6 +70,93 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search. + * A warning will be triggered on violation. + */ +static const struct iwl_hcmd_names iwl_dvm_cmd_names[] = { + HCMD_NAME(REPLY_ALIVE), + HCMD_NAME(REPLY_ERROR), + HCMD_NAME(REPLY_ECHO), + HCMD_NAME(REPLY_RXON), + HCMD_NAME(REPLY_RXON_ASSOC), + HCMD_NAME(REPLY_QOS_PARAM), + HCMD_NAME(REPLY_RXON_TIMING), + HCMD_NAME(REPLY_ADD_STA), + HCMD_NAME(REPLY_REMOVE_STA), + HCMD_NAME(REPLY_REMOVE_ALL_STA), + HCMD_NAME(REPLY_TX), + HCMD_NAME(REPLY_TXFIFO_FLUSH), + HCMD_NAME(REPLY_WEPKEY), + HCMD_NAME(REPLY_LEDS_CMD), + HCMD_NAME(REPLY_TX_LINK_QUALITY_CMD), + HCMD_NAME(COEX_PRIORITY_TABLE_CMD), + HCMD_NAME(COEX_MEDIUM_NOTIFICATION), + HCMD_NAME(COEX_EVENT_CMD), + HCMD_NAME(TEMPERATURE_NOTIFICATION), + HCMD_NAME(CALIBRATION_CFG_CMD), + HCMD_NAME(CALIBRATION_RES_NOTIFICATION), + HCMD_NAME(CALIBRATION_COMPLETE_NOTIFICATION), + HCMD_NAME(REPLY_QUIET_CMD), + HCMD_NAME(REPLY_CHANNEL_SWITCH), + HCMD_NAME(CHANNEL_SWITCH_NOTIFICATION), + HCMD_NAME(REPLY_SPECTRUM_MEASUREMENT_CMD), + HCMD_NAME(SPECTRUM_MEASURE_NOTIFICATION), + HCMD_NAME(POWER_TABLE_CMD), + HCMD_NAME(PM_SLEEP_NOTIFICATION), + HCMD_NAME(PM_DEBUG_STATISTIC_NOTIFIC), + HCMD_NAME(REPLY_SCAN_CMD), + HCMD_NAME(REPLY_SCAN_ABORT_CMD), + HCMD_NAME(SCAN_START_NOTIFICATION), + HCMD_NAME(SCAN_RESULTS_NOTIFICATION), + HCMD_NAME(SCAN_COMPLETE_NOTIFICATION), + HCMD_NAME(BEACON_NOTIFICATION), + HCMD_NAME(REPLY_TX_BEACON), + HCMD_NAME(WHO_IS_AWAKE_NOTIFICATION), + HCMD_NAME(REPLY_TX_POWER_DBM_CMD), + HCMD_NAME(QUIET_NOTIFICATION), + HCMD_NAME(REPLY_TX_PWR_TABLE_CMD), + HCMD_NAME(REPLY_TX_POWER_DBM_CMD_V1), + HCMD_NAME(TX_ANT_CONFIGURATION_CMD), + HCMD_NAME(MEASURE_ABORT_NOTIFICATION), + HCMD_NAME(REPLY_BT_CONFIG), + HCMD_NAME(REPLY_STATISTICS_CMD), + HCMD_NAME(STATISTICS_NOTIFICATION), + HCMD_NAME(REPLY_CARD_STATE_CMD), + HCMD_NAME(CARD_STATE_NOTIFICATION), + HCMD_NAME(MISSED_BEACONS_NOTIFICATION), + HCMD_NAME(REPLY_CT_KILL_CONFIG_CMD), + HCMD_NAME(SENSITIVITY_CMD), + HCMD_NAME(REPLY_PHY_CALIBRATION_CMD), + HCMD_NAME(REPLY_WIPAN_PARAMS), + HCMD_NAME(REPLY_WIPAN_RXON), + HCMD_NAME(REPLY_WIPAN_RXON_TIMING), + HCMD_NAME(REPLY_WIPAN_RXON_ASSOC), + HCMD_NAME(REPLY_WIPAN_QOS_PARAM), + HCMD_NAME(REPLY_WIPAN_WEPKEY), + HCMD_NAME(REPLY_WIPAN_P2P_CHANNEL_SWITCH), + HCMD_NAME(REPLY_WIPAN_NOA_NOTIFICATION), + HCMD_NAME(REPLY_WIPAN_DEACTIVATION_COMPLETE), + HCMD_NAME(REPLY_RX_PHY_CMD), + HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(REPLY_RX), + HCMD_NAME(REPLY_COMPRESSED_BA), + HCMD_NAME(REPLY_BT_COEX_PRIO_TABLE), + HCMD_NAME(REPLY_BT_COEX_PROT_ENV), + HCMD_NAME(REPLY_BT_COEX_PROFILE_NOTIF), + HCMD_NAME(REPLY_D3_CONFIG), + HCMD_NAME(REPLY_WOWLAN_PATTERNS), + HCMD_NAME(REPLY_WOWLAN_WAKEUP_FILTER), + HCMD_NAME(REPLY_WOWLAN_TSC_RSC_PARAMS), + HCMD_NAME(REPLY_WOWLAN_TKIP_PARAMS), + HCMD_NAME(REPLY_WOWLAN_KEK_KCK_MATERIAL), + HCMD_NAME(REPLY_WOWLAN_GET_STATUS), +}; + +static const struct iwl_hcmd_arr iwl_dvm_groups[] = { + [0x0] = HCMD_ARR(iwl_dvm_cmd_names), +}; + static const struct iwl_op_mode_ops iwl_dvm_ops; void iwl_update_chain_flags(struct iwl_priv *priv) @@ -341,7 +429,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); /* Make sure device is powered up for SRAM reads */ - if (!iwl_trans_grab_nic_access(priv->trans, false, ®_flags)) + if (!iwl_trans_grab_nic_access(priv->trans, ®_flags)) return; /* Set starting address; reads will auto-increment */ @@ -1227,10 +1315,26 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + + switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_4K: + trans_cfg.rx_buf_size = IWL_AMSDU_4K; + break; + case IWL_AMSDU_8K: + trans_cfg.rx_buf_size = IWL_AMSDU_8K; + break; + case IWL_AMSDU_12K: + default: + trans_cfg.rx_buf_size = IWL_AMSDU_4K; + pr_err("Unsupported amsdu_size: %d\n", + iwlwifi_mod_params.amsdu_size); + } + trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED; - trans_cfg.command_names = iwl_dvm_cmd_strings; + trans_cfg.command_groups = iwl_dvm_groups; + trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); + trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < @@ -1251,6 +1355,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + trans->command_groups = trans_cfg.command_groups; + trans->command_groups_size = trans_cfg.command_groups_size; /* At this point both hw and priv are allocated. */ @@ -1625,7 +1731,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, ptr = base + EVENT_START_OFFSET + (start_idx * event_size); /* Make sure device is powered up for SRAM reads */ - if (!iwl_trans_grab_nic_access(trans, false, ®_flags)) + if (!iwl_trans_grab_nic_access(trans, ®_flags)) return pos; /* Set starting address; reads will auto-increment */ diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c index 1513dbc79c14..0ad557c89514 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h index 570d3a5e4670..2fd9b43adafd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #ifndef __iwl_power_setting_h__ diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index cef921c1a623..ee7505537c96 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index f6bd25cad203..c5fe44584613 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 4a45b0b594c7..52ab1e012e8f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. @@ -22,7 +23,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -32,91 +33,13 @@ #include <linux/sched.h> #include <net/mac80211.h> #include <asm/unaligned.h> + +#include "iwl-trans.h" #include "iwl-io.h" #include "dev.h" #include "calib.h" #include "agn.h" -#define IWL_CMD_ENTRY(x) [x] = #x - -const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = { - IWL_CMD_ENTRY(REPLY_ALIVE), - IWL_CMD_ENTRY(REPLY_ERROR), - IWL_CMD_ENTRY(REPLY_ECHO), - IWL_CMD_ENTRY(REPLY_RXON), - IWL_CMD_ENTRY(REPLY_RXON_ASSOC), - IWL_CMD_ENTRY(REPLY_QOS_PARAM), - IWL_CMD_ENTRY(REPLY_RXON_TIMING), - IWL_CMD_ENTRY(REPLY_ADD_STA), - IWL_CMD_ENTRY(REPLY_REMOVE_STA), - IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA), - IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH), - IWL_CMD_ENTRY(REPLY_WEPKEY), - IWL_CMD_ENTRY(REPLY_TX), - IWL_CMD_ENTRY(REPLY_LEDS_CMD), - IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD), - IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD), - IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION), - IWL_CMD_ENTRY(COEX_EVENT_CMD), - IWL_CMD_ENTRY(REPLY_QUIET_CMD), - IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH), - IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD), - IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION), - IWL_CMD_ENTRY(POWER_TABLE_CMD), - IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION), - IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC), - IWL_CMD_ENTRY(REPLY_SCAN_CMD), - IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD), - IWL_CMD_ENTRY(SCAN_START_NOTIFICATION), - IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION), - IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION), - IWL_CMD_ENTRY(BEACON_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_BEACON), - IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION), - IWL_CMD_ENTRY(QUIET_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD), - IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_BT_CONFIG), - IWL_CMD_ENTRY(REPLY_STATISTICS_CMD), - IWL_CMD_ENTRY(STATISTICS_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD), - IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION), - IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD), - IWL_CMD_ENTRY(SENSITIVITY_CMD), - IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD), - IWL_CMD_ENTRY(REPLY_RX_PHY_CMD), - IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD), - IWL_CMD_ENTRY(REPLY_COMPRESSED_BA), - IWL_CMD_ENTRY(CALIBRATION_CFG_CMD), - IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION), - IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD), - IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION), - IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD), - IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF), - IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE), - IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV), - IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC), - IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM), - IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY), - IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH), - IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE), - IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS), - IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER), - IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS), - IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS), - IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL), - IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS), - IWL_CMD_ENTRY(REPLY_D3_CONFIG), -}; -#undef IWL_CMD_ENTRY - /****************************************************************************** * * Generic RX handler implementations @@ -1095,7 +1018,9 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, } else { /* No handling needed */ IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", - iwl_dvm_get_cmd_string(pkt->hdr.cmd), + iwl_get_cmd_string(priv->trans, + iwl_cmd_id(pkt->hdr.cmd, + 0, 0)), pkt->hdr.cmd); } } diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 85ceceb34fcc..2d47cb24c48b 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -20,7 +20,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 648159495bbc..81a2ddbe9569 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -22,7 +22,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include <linux/slab.h> diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index 0fa67d3b7235..8e9768a553e4 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index c4736c8834c5..5b73492e7ff7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ @@ -184,7 +184,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data) priv->thermal_throttle.ct_kill_toggle = true; } iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); - if (iwl_trans_grab_nic_access(priv->trans, false, &flags)) + if (iwl_trans_grab_nic_access(priv->trans, &flags)) iwl_trans_release_nic_access(priv->trans, &flags); /* Reschedule the ct_kill timer to occur in diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 507726534b84..d324e9be9cbf 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -22,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #ifndef __iwl_tt_setting_h__ diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index bddd19769035..59e2001c39f8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -22,7 +22,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -383,6 +383,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc); memset(&info->status, 0, sizeof(info->status)); + memset(info->driver_data, 0, sizeof(info->driver_data)); info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index 931a8e4269ef..b662cf35b033 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -23,7 +23,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c index 06f6cc08f451..a90dbab6bbbe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c index 890b95f497d6..a6da9594c4a5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c index 724194e23414..8b5afdef2d83 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index 21b2630763dc..0b4ba781b631 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index bf88ec3a65fa..e60cf141ed79 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,13 +71,19 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 19 +#define IWL7260_UCODE_API_MAX 17 +#define IWL7265_UCODE_API_MAX 17 +#define IWL7265D_UCODE_API_MAX 20 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 +#define IWL7265_UCODE_API_OK 13 +#define IWL7265D_UCODE_API_OK 13 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 13 +#define IWL7265_UCODE_API_MIN 13 +#define IWL7265D_UCODE_API_MIN 13 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -149,10 +157,7 @@ static const struct iwl_ht_params iwl7000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; -#define IWL_DEVICE_7000 \ - .ucode_api_max = IWL7260_UCODE_API_MAX, \ - .ucode_api_ok = IWL7260_UCODE_API_OK, \ - .ucode_api_min = IWL7260_UCODE_API_MIN, \ +#define IWL_DEVICE_7000_COMMON \ .device_family = IWL_DEVICE_FAMILY_7000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ @@ -163,6 +168,24 @@ static const struct iwl_ht_params iwl7000_ht_params = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .dccm_offset = IWL7000_DCCM_OFFSET +#define IWL_DEVICE_7000 \ + IWL_DEVICE_7000_COMMON, \ + .ucode_api_max = IWL7260_UCODE_API_MAX, \ + .ucode_api_ok = IWL7260_UCODE_API_OK, \ + .ucode_api_min = IWL7260_UCODE_API_MIN + +#define IWL_DEVICE_7005 \ + IWL_DEVICE_7000_COMMON, \ + .ucode_api_max = IWL7265_UCODE_API_MAX, \ + .ucode_api_ok = IWL7265_UCODE_API_OK, \ + .ucode_api_min = IWL7265_UCODE_API_MIN + +#define IWL_DEVICE_7005D \ + IWL_DEVICE_7000_COMMON, \ + .ucode_api_max = IWL7265D_UCODE_API_MAX, \ + .ucode_api_ok = IWL7265D_UCODE_API_OK, \ + .ucode_api_min = IWL7265D_UCODE_API_MIN + const struct iwl_cfg iwl7260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, @@ -266,6 +289,17 @@ static const struct iwl_ht_params iwl7265_ht_params = { const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7005D, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3165_NVM_VERSION, + .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl3168_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3168", + .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, @@ -277,7 +311,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = { const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -288,7 +322,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = { const struct iwl_cfg iwl7265_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -299,7 +333,7 @@ const struct iwl_cfg iwl7265_2n_cfg = { const struct iwl_cfg iwl7265_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -310,7 +344,7 @@ const struct iwl_cfg iwl7265_n_cfg = { const struct iwl_cfg iwl7265d_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -321,7 +355,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = { const struct iwl_cfg iwl7265d_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -332,7 +366,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = { const struct iwl_cfg iwl7265d_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, + IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, @@ -342,5 +376,5 @@ const struct iwl_cfg iwl7265d_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 9bcc0bf937d8..c84a0299d43e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 19 +#define IWL8000_UCODE_API_MAX 20 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 13 @@ -154,7 +154,6 @@ static const struct iwl_tt_params iwl8000_tt_params = { .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ - .d0i3 = true, \ .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ @@ -187,6 +186,16 @@ const struct iwl_cfg iwl8260_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwl8265_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 8265", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl4165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 4165", .fw_name_pre = IWL8000_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c new file mode 100644 index 000000000000..ecbf4822cd69 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -0,0 +1,163 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL9000_UCODE_API_MAX 20 + +/* Oldest version we won't warn about */ +#define IWL9000_UCODE_API_OK 13 + +/* Lowest firmware API version supported */ +#define IWL9000_UCODE_API_MIN 13 + +/* NVM versions */ +#define IWL9000_NVM_VERSION 0x0a1d +#define IWL9000_TX_POWER_VERSION 0xffff /* meaningless */ + +/* Memory offsets and lengths */ +#define IWL9000_DCCM_OFFSET 0x800000 +#define IWL9000_DCCM_LEN 0x18000 +#define IWL9000_DCCM2_OFFSET 0x880000 +#define IWL9000_DCCM2_LEN 0x8000 +#define IWL9000_SMEM_OFFSET 0x400000 +#define IWL9000_SMEM_LEN 0x68000 + +#define IWL9000_FW_PRE "iwlwifi-9000-" +#define IWL9000_MODULE_FIRMWARE(api) \ + IWL9000_FW_PRE "-" __stringify(api) ".ucode" + +#define NVM_HW_SECTION_NUM_FAMILY_9000 10 + +static const struct iwl_base_params iwl9000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, + .num_of_queues = 31, + .pll_cfg_val = 0, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_ht_params iwl9000_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +static const struct iwl_tt_params iwl9000_tt_params = { + .ct_kill_entry = 115, + .ct_kill_exit = 93, + .ct_kill_duration = 5, + .dynamic_smps_entry = 111, + .dynamic_smps_exit = 107, + .tx_protection_entry = 112, + .tx_protection_exit = 105, + .tx_backoff = { + {.temperature = 110, .backoff = 200}, + {.temperature = 111, .backoff = 600}, + {.temperature = 112, .backoff = 1200}, + {.temperature = 113, .backoff = 2000}, + {.temperature = 114, .backoff = 4000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +#define IWL_DEVICE_9000 \ + .ucode_api_max = IWL9000_UCODE_API_MAX, \ + .ucode_api_ok = IWL9000_UCODE_API_OK, \ + .ucode_api_min = IWL9000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_8000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl9000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL9000_DCCM_OFFSET, \ + .dccm_len = IWL9000_DCCM_LEN, \ + .dccm2_offset = IWL9000_DCCM2_OFFSET, \ + .dccm2_len = IWL9000_DCCM2_LEN, \ + .smem_offset = IWL9000_SMEM_OFFSET, \ + .smem_len = IWL9000_SMEM_LEN, \ + .thermal_params = &iwl9000_tt_params, \ + .apmg_not_supported = true + +const struct iwl_cfg iwl9260_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9260", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl5165_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 5165", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h index 04a483d38659..ee9347a54cdc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 910970858f98..f99048135fb9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -254,6 +254,7 @@ struct iwl_tt_params { #define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ +#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000 struct iwl_eeprom_params { const u8 regulatory_bands[7]; @@ -295,7 +296,6 @@ struct iwl_pwr_tx_backoff { * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation * mode set - * @d0i3: device uses d0i3 instead of d3 * @nvm_hw_section_num: the ID of the HW NVM section * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs @@ -342,7 +342,6 @@ struct iwl_cfg { const bool internal_wimax_coex; const bool host_interrupt_operation_mode; bool high_temp; - bool d0i3; u8 nvm_hw_section_num; bool lp_xtal_workaround; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; @@ -421,6 +420,7 @@ extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl3165_2ac_cfg; +extern const struct iwl_cfg iwl3168_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; @@ -429,9 +429,12 @@ extern const struct iwl_cfg iwl7265d_2n_cfg; extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; +extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; +extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl5165_2ac_cfg; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 543abeaffcf0..163b21bc20cb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c index 09feff4fa226..b1c3b0d0fcc6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index 9bb36d79c2bd..110333208450 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -21,7 +21,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -163,7 +163,6 @@ do { \ #define IWL_DL_FW 0x00010000 #define IWL_DL_RF_KILL 0x00020000 #define IWL_DL_FW_ERRORS 0x00040000 -#define IWL_DL_LED 0x00080000 /* 0x00F00000 - 0x00100000 */ #define IWL_DL_RATE 0x00100000 #define IWL_DL_CALIB 0x00200000 @@ -189,7 +188,6 @@ do { \ #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) #define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) #define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) -#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) #define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index 71a78cede9b0..d80312b46f16 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -19,7 +20,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -51,6 +52,22 @@ TRACE_EVENT(iwlwifi_dev_tx_data, TP_printk("[%s] TX frame data", __get_str(dev)) ); +TRACE_EVENT(iwlwifi_dev_tx_tso_chunk, + TP_PROTO(const struct device *dev, + u8 *data_src, size_t data_len), + TP_ARGS(dev, data_src, data_len), + TP_STRUCT__entry( + DEV_ENTRY + + __dynamic_array(u8, data, data_len) + ), + TP_fast_assign( + DEV_ASSIGN; + memcpy(__get_dynamic_array(data), data_src, data_len); + ), + TP_printk("[%s] TX frame data", __get_str(dev)) +); + TRACE_EVENT(iwlwifi_dev_rx_data, TP_PROTO(const struct device *dev, const struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h index f62c54485852..27914eedc146 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index eb4b99a1c8cd..22786d7dc00a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -20,7 +20,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h index a3b3c2465f89..5dfc9295a7e0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h index 10839fae9cd9..e9b8673dd245 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c index 90987d6f348e..1d9dd153ef1c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index b87acd6a229b..f4d3cd010087 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -19,7 +19,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 463cadfbfccb..7acb49075683 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -451,7 +451,9 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { - IWL_ERR(drv, "api_index larger than supported by driver\n"); + IWL_ERR(drv, + "api flags index %d larger than supported by driver\n", + api_index); /* don't return an error so we can load FW that has more bits */ return 0; } @@ -473,7 +475,9 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { - IWL_ERR(drv, "api_index larger than supported by driver\n"); + IWL_ERR(drv, + "capa flags index %d larger than supported by driver\n", + api_index); /* don't return an error so we can load FW that has more bits */ return 0; } @@ -590,7 +594,8 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, - struct iwl_ucode_capabilities *capa) + struct iwl_ucode_capabilities *capa, + bool *usniffer_images) { struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; struct iwl_ucode_tlv *tlv; @@ -603,7 +608,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, char buildstr[25]; u32 build, paging_mem_size; int num_of_cpus; - bool usniffer_images = false; bool usniffer_req = false; bool gscan_capa = false; @@ -976,7 +980,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_SEC_RT_USNIFFER: - usniffer_images = true; + *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER, tlv_len); @@ -1027,7 +1031,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } } - if (usniffer_req && !usniffer_images) { + if (usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; @@ -1188,6 +1192,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) u32 api_ver; int i; bool load_module = false; + bool usniffer_images = false; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1225,7 +1230,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); else err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, - &fw->ucode_capa); + &fw->ucode_capa, &usniffer_images); if (err) goto try_again; @@ -1323,6 +1328,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) sizeof(struct iwl_fw_dbg_trigger_time_event); trigger_tlv_sz[FW_DBG_TRIGGER_BA] = sizeof(struct iwl_fw_dbg_trigger_ba); + trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = + sizeof(struct iwl_fw_dbg_trigger_tdls); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { @@ -1539,6 +1546,7 @@ struct iwl_mod_params iwlwifi_mod_params = { .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .d0i3_disable = true, + .d0i3_entry_delay = 1000, #ifndef CONFIG_IWLWIFI_UAPSD .uapsd_disable = true, #endif /* CONFIG_IWLWIFI_UAPSD */ @@ -1637,9 +1645,9 @@ MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); -module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, +module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); +MODULE_PARM_DESC(amsdu_size, "amsdu size 0:4K 1:8K 2:12K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); @@ -1704,3 +1712,7 @@ MODULE_PARM_DESC(power_level, module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO); MODULE_PARM_DESC(fw_monitor, "firmware monitor - to debug FW (default: false - needs lots of memory)"); + +module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay, + uint, S_IRUGO); +MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index cda746b33db1..f6eacfdbc265 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -69,7 +69,7 @@ /* for all modules */ #define DRV_NAME "iwlwifi" #define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" -#define DRV_AUTHOR "<ilw@linux.intel.com>" +#define DRV_AUTHOR "<linuxwifi@intel.com>" /* radio config bits (actual values from NVM definition) */ #define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index acc3d186c5c1..c15f5be85197 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -454,11 +454,11 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev, TXP_CHECK_AND_PRINT(COMMON_TYPE), txp->flags); IWL_DEBUG_EEPROM(dev, - "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n", + "\t\t chain_A: %d chain_B: %d chain_C: %d\n", txp->chain_a_max, txp->chain_b_max, txp->chain_c_max); IWL_DEBUG_EEPROM(dev, - "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n", + "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n", txp->mimo2_max, txp->mimo3_max, ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); @@ -766,7 +766,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, if (cfg->ht_params->ldpc) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - if (iwlwifi_mod_params.amsdu_size_8K) + if (iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 750c8c9ee70d..ad2b834668ff 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index 219ca8acca62..f2cea1c7befc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h index a6d3bdf82cdd..1ed78be06c23 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index d56064861a9c..5cc6be927eab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index 9dbe19cbb4dd..a5aaf6853704 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -88,6 +88,7 @@ * &struct iwl_fw_error_dump_rb * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were * paged to the DRAM. + * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -103,6 +104,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_ERROR_INFO = 10, IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_PAGING = 12, + IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_MAX, }; @@ -288,6 +290,9 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. + * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a + * threshold. + * @FW_DBG_TDLS: trigger log collection upon TDLS related events. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -302,6 +307,8 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_TXQ_TIMERS, FW_DBG_TRIGGER_TIME_EVENT, FW_DBG_TRIGGER_BA, + FW_DBG_TRIGGER_TX_LATENCY, + FW_DBG_TRIGGER_TDLS, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 08303db0000f..84f8aeb926c8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -308,6 +310,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts + * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT + * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what + * antenna the beacon should be transmitted + * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -334,6 +340,9 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, + IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ @@ -548,6 +557,8 @@ enum iwl_fw_dbg_trigger_vif_type { * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what * configuration should be applied when the triggers kicks in. * @occurrences: number of occurrences. 0 means the trigger will never fire. + * @trig_dis_ms: the time, in milliseconds, after an occurrence of this + * trigger in which another occurrence should be ignored. */ struct iwl_fw_dbg_trigger_tlv { __le32 id; @@ -557,7 +568,8 @@ struct iwl_fw_dbg_trigger_tlv { u8 mode; u8 start_conf_id; __le16 occurrences; - __le32 reserved[2]; + __le16 trig_dis_ms; + __le16 reserved[3]; u8 data[0]; } __packed; @@ -723,6 +735,19 @@ struct iwl_fw_dbg_trigger_ba { } __packed; /** + * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events. + * @action_bitmap: the TDLS action to trigger the collection upon + * @peer_mode: trigger on specific peer or all + * @peer: the TDLS peer to trigger the collection on + */ +struct iwl_fw_dbg_trigger_tdls { + u8 action_bitmap; + u8 peer_mode; + u8 peer[ETH_ALEN]; + u8 reserved[4]; +} __packed; + +/** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 84ec0cefb62a..85d6d6d55e2f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -305,18 +305,4 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) return conf_tlv->usniffer; } -#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ - void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ - unlikely(__dbg_trigger); \ -}) - -static inline struct iwl_fw_dbg_trigger_tlv* -iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id) -{ - if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv))) - return NULL; - - return fw->dbg_trigger_tlv[id]; -} - #endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 0bd9d4aad0c0..32c8f84ae519 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project. * @@ -21,7 +22,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -81,7 +82,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) { u32 value = 0x5a5a5a5a; unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { + if (iwl_trans_grab_nic_access(trans, &flags)) { value = iwl_read32(trans, reg); iwl_trans_release_nic_access(trans, &flags); } @@ -94,7 +95,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) { unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { + if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, reg, value); iwl_trans_release_nic_access(trans, &flags); } @@ -117,26 +118,28 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, } IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); -u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) +u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read_prph(trans, ofs); trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); return val; } +IWL_EXPORT_SYMBOL(iwl_read_prph_no_grab); -void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); iwl_trans_write_prph(trans, ofs, val); } +IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { unsigned long flags; u32 val = 0x5a5a5a5a; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - val = __iwl_read_prph(trans, ofs); + if (iwl_trans_grab_nic_access(trans, &flags)) { + val = iwl_read_prph_no_grab(trans, ofs); iwl_trans_release_nic_access(trans, &flags); } return val; @@ -147,8 +150,8 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) { unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, val); + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write_prph_no_grab(trans, ofs, val); iwl_trans_release_nic_access(trans, &flags); } } @@ -173,9 +176,10 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, - __iwl_read_prph(trans, ofs) | mask); + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write_prph_no_grab(trans, ofs, + iwl_read_prph_no_grab(trans, ofs) | + mask); iwl_trans_release_nic_access(trans, &flags); } } @@ -186,9 +190,10 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, { unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, - (__iwl_read_prph(trans, ofs) & mask) | bits); + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write_prph_no_grab(trans, ofs, + (iwl_read_prph_no_grab(trans, ofs) & + mask) | bits); iwl_trans_release_nic_access(trans, &flags); } } @@ -199,9 +204,9 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) unsigned long flags; u32 val; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - val = __iwl_read_prph(trans, ofs); - __iwl_write_prph(trans, ofs, (val & ~mask)); + if (iwl_trans_grab_nic_access(trans, &flags)) { + val = iwl_read_prph_no_grab(trans, ofs); + iwl_write_prph_no_grab(trans, ofs, (val & ~mask)); iwl_trans_release_nic_access(trans, &flags); } } diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 501d0560c061..a9bcc788cae1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -21,7 +21,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -55,9 +55,9 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); -u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs); +u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); -void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); +void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val); void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index ac2b90df8413..fd42f63f5e84 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -86,6 +86,12 @@ enum iwl_disable_11n { IWL_ENABLE_HT_TXAGG = BIT(3), }; +enum iwl_amsdu_size { + IWL_AMSDU_4K = 0, + IWL_AMSDU_8K = 1, + IWL_AMSDU_12K = 2, +}; + /** * struct iwl_mod_params * @@ -94,7 +100,7 @@ enum iwl_disable_11n { * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_[DIS,EN]ABLE_HT_* constants - * @amsdu_size_8K: enable 8K amsdu size, default = 0 + * @amsdu_size: enable 8K amsdu size, default = 4K. enum iwl_amsdu_size. * @restart_fw: restart firmware, default = 1 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 @@ -103,13 +109,15 @@ enum iwl_disable_11n { * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 * @d0i3_disable: disable d0i3, default = 1, + * @d0i3_entry_delay: time to wait after no refs are taken before + * entering D0i3 (in msecs) * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor */ struct iwl_mod_params { int sw_crypto; unsigned int disable_11n; - int amsdu_size_8K; + int amsdu_size; bool restart_fw; bool bt_coex_active; int led_mode; @@ -122,6 +130,7 @@ struct iwl_mod_params { char *nvm_file; bool uapsd_disable; bool d0i3_disable; + unsigned int d0i3_entry_delay; bool lar_disable; bool fw_monitor; }; diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c index 6caf2affbbb5..8aa1f2b7fdfc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h index dbe8234521de..0f9995ed71cd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index d82984912e04..7b89bfc8c8ac 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -379,8 +379,19 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; - if (iwlwifi_mod_params.amsdu_size_8K) + switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_4K: + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; + break; + case IWL_AMSDU_8K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; + break; + case IWL_AMSDU_12K: + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + break; + default: + break; + } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | @@ -580,15 +591,13 @@ static void iwl_set_hw_address_family_8000(struct device *dev, IWL_ERR_DEV(dev, "mac address is not found\n"); } -#define IWL_4165_DEVICE_ID 0x5501 - struct iwl_nvm_data * iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1, u32 hw_id) + u32 mac_addr0, u32 mac_addr1) { struct iwl_nvm_data *data; u32 sku; @@ -627,17 +636,6 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, (sku & NVM_SKU_CAP_11AC_ENABLE); data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; - /* - * OTP 0x52 bug work around - * define antenna 1x1 according to MIMO disabled - */ - if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) { - data->valid_tx_ant = ANT_B; - data->valid_rx_ant = ANT_B; - tx_chains = ANT_B; - rx_chains = ANT_B; - } - data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 9f44d8188c5c..92466ee72806 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1, u32 hw_id); + u32 mac_addr0, u32 mac_addr1); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 2a58d6833224..b49eda8150bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -123,6 +123,8 @@ struct iwl_cfg; * received on the RSS queue(s). The queue parameter indicates which of the * RSS queues received this frame; it will always be non-zero. * This method must not sleep. + * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set + * completes. Must be atomic. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. @@ -155,6 +157,8 @@ struct iwl_op_mode_ops { struct iwl_rx_cmd_buffer *rxb); void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue); + void (*async_cb)(struct iwl_op_mode *op_mode, + const struct iwl_device_cmd *cmd); void (*queue_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); @@ -203,6 +207,13 @@ static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, op_mode->ops->rx_rss(op_mode, napi, rxb, queue); } +static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode, + const struct iwl_device_cmd *cmd) +{ + if (op_mode->ops->async_cb) + op_mode->ops->async_cb(op_mode, cmd); +} + static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, int queue) { diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c index a105455b6a24..4a4dea08751c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h index 9ee18d0d2d01..24103877eab0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 3ab777f79e4f..5bde23a472b4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -345,6 +345,12 @@ enum secure_load_status_reg { #define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_ADDR (0xa0044c) +/* Radio registers access */ +#define RSP_RADIO_CMD (0xa02804) +#define RSP_RADIO_RDDAT (0xa02814) +#define RADIO_RSP_ADDR_POS (6) +#define RADIO_RSP_RD_CMD (3) + /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) #define MON_BUFF_BASE_ADDR (0xa03c3c) diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h index f2353ebf2666..99b43da32adf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scd.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 71610968c365..6069a9ff53fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -61,7 +61,10 @@ * *****************************************************************************/ #include <linux/kernel.h> +#include <linux/bsearch.h> + #include "iwl-trans.h" +#include "iwl-drv.h" struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, @@ -112,3 +115,91 @@ void iwl_trans_free(struct iwl_trans *trans) kmem_cache_destroy(trans->dev_cmd_pool); kfree(trans); } + +int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) +{ + int ret; + + if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status))) + return -ERFKILL; + + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } + + if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) && + !(cmd->flags & CMD_ASYNC))) + return -EINVAL; + + if (!(cmd->flags & CMD_ASYNC)) + lock_map_acquire_read(&trans->sync_cmd_lockdep_map); + + ret = trans->ops->send_cmd(trans, cmd); + + if (!(cmd->flags & CMD_ASYNC)) + lock_map_release(&trans->sync_cmd_lockdep_map); + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); + +/* Comparator for struct iwl_hcmd_names. + * Used in the binary search over a list of host commands. + * + * @key: command_id that we're looking for. + * @elt: struct iwl_hcmd_names candidate for match. + * + * @return 0 iff equal. + */ +static int iwl_hcmd_names_cmp(const void *key, const void *elt) +{ + const struct iwl_hcmd_names *name = elt; + u8 cmd1 = *(u8 *)key; + u8 cmd2 = name->cmd_id; + + return (cmd1 - cmd2); +} + +const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) +{ + u8 grp, cmd; + struct iwl_hcmd_names *ret; + const struct iwl_hcmd_arr *arr; + size_t size = sizeof(struct iwl_hcmd_names); + + grp = iwl_cmd_groupid(id); + cmd = iwl_cmd_opcode(id); + + if (!trans->command_groups || grp >= trans->command_groups_size || + !trans->command_groups[grp].arr) + return "UNKNOWN"; + + arr = &trans->command_groups[grp]; + ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); + if (!ret) + return "UNKNOWN"; + return ret->cmd_name; +} +IWL_EXPORT_SYMBOL(iwl_get_cmd_string); + +int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) +{ + int i, j; + const struct iwl_hcmd_arr *arr; + + for (i = 0; i < trans->command_groups_size; i++) { + arr = &trans->command_groups[i]; + if (!arr->arr) + continue; + for (j = 0; j < arr->size - 1; j++) + if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) + return -1; + } + return 0; +} +IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 6f76525088f0..82fb3a97a46d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -68,6 +68,7 @@ #include <linux/ieee80211.h> #include <linux/mm.h> /* for page_address */ #include <linux/lockdep.h> +#include <linux/kernel.h> #include "iwl-debug.h" #include "iwl-config.h" @@ -248,6 +249,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. * @CMD_WAKE_UP_TRANS: The command response should wake up the trans * (i.e. mark it as non-idle). + * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be + * called after this command completes. Valid only with CMD_ASYNC. * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to * check that we leave enough room for the TBs bitmap which needs 20 bits. */ @@ -259,6 +262,7 @@ enum CMD_MODE { CMD_SEND_IN_IDLE = BIT(4), CMD_MAKE_TRANS_IDLE = BIT(5), CMD_WAKE_UP_TRANS = BIT(6), + CMD_WANT_ASYNC_CALLBACK = BIT(7), CMD_TB_BITMAP_POS = 11, }; @@ -377,6 +381,11 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define MAX_NO_RECLAIM_CMDS 6 +/* + * The first entry in driver_data array in ieee80211_tx_info + * that can be used by the transport. + */ +#define IWL_TRANS_FIRST_DRIVER_DATA 2 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) /* @@ -423,6 +432,38 @@ enum iwl_trans_status { STATUS_TRANS_DEAD, }; +static inline int +iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) +{ + switch (rb_size) { + case IWL_AMSDU_4K: + return get_order(4 * 1024); + case IWL_AMSDU_8K: + return get_order(8 * 1024); + case IWL_AMSDU_12K: + return get_order(12 * 1024); + default: + WARN_ON(1); + return -1; + } +} + +struct iwl_hcmd_names { + u8 cmd_id; + const char *const cmd_name; +}; + +#define HCMD_NAME(x) \ + { .cmd_id = x, .cmd_name = #x } + +struct iwl_hcmd_arr { + const struct iwl_hcmd_names *arr; + int size; +}; + +#define HCMD_ARR(x) \ + { .arr = x, .size = ARRAY_SIZE(x) } + /** * struct iwl_trans_config - transport configuration * @@ -436,14 +477,16 @@ enum iwl_trans_status { * list of such notifications to filter. Max length is * %MAX_NO_RECLAIM_CMDS. * @n_no_reclaim_cmds: # of commands in list - * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, + * @rx_buf_size: RX buffer size needed for A-MSDUs * if unset 4k will be the RX buffer size * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @wide_cmd_header: firmware supports wide host command header - * @command_names: array of command names, must be 256 entries - * (one for each command); for debugging only + * @sw_csum_tx: transport should compute the TCP checksum + * @command_groups: array of command groups, each member is an array of the + * commands in the group; for debugging only + * @command_groups_size: number of command groups, to avoid illegal access * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until * we get the ALIVE from the uCode */ @@ -456,12 +499,14 @@ struct iwl_trans_config { const u8 *no_reclaim_cmds; unsigned int n_no_reclaim_cmds; - bool rx_buf_size_8k; + enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; bool wide_cmd_header; - const char *const *command_names; - + bool sw_csum_tx; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + u32 sdio_adma_addr; }; @@ -512,7 +557,11 @@ struct iwl_trans_txq_scd_cfg { * If RFkill is asserted in the middle of a SYNC host command, it must * return -ERFKILL straight away. * May sleep only if CMD_ASYNC is not set - * @tx: send an skb + * @tx: send an skb. The transport relies on the op_mode to zero the + * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all + * the CSUM will be taken care of (TCP CSUM and IP header in case of + * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP + * header if it is IPv4. * Must be atomic * @reclaim: free packet until ssn. Returns a list of freed packets. * Must be atomic @@ -526,8 +575,11 @@ struct iwl_trans_txq_scd_cfg { * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. * @freeze_txq_timer: prevents the timer of the queue from firing until the * queue is set to awake. Must be atomic. - * @dbgfs_register: add the dbgfs files under this directory. Files will be - * automatically deleted. + * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note + * that the transport needs to refcount the calls since this function + * will be called several times with block = true, and then the queues + * need to be unblocked only after the same number of calls with + * block = false. * @write8: write a u8 to a register at offset ofs from the BAR * @write32: write a u32 to a register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR @@ -583,10 +635,10 @@ struct iwl_trans_ops { void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); - int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); + void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); @@ -600,8 +652,7 @@ struct iwl_trans_ops { void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); - bool (*grab_nic_access)(struct iwl_trans *trans, bool silent, - unsigned long *flags); + bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*release_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, @@ -612,7 +663,7 @@ struct iwl_trans_ops { void (*resume)(struct iwl_trans *trans); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv + const struct iwl_fw_dbg_trigger_tlv *trigger); }; @@ -628,18 +679,61 @@ enum iwl_trans_state { }; /** - * enum iwl_d0i3_mode - d0i3 mode + * DOC: Platform power management + * + * There are two types of platform power management: system-wide + * (WoWLAN) and runtime. + * + * In system-wide power management the entire platform goes into a low + * power state (e.g. idle or suspend to RAM) at the same time and the + * device is configured as a wakeup source for the entire platform. + * This is usually triggered by userspace activity (e.g. the user + * presses the suspend button or a power management daemon decides to + * put the platform in low power mode). The device's behavior in this + * mode is dictated by the wake-on-WLAN configuration. + * + * In runtime power management, only the devices which are themselves + * idle enter a low power state. This is done at runtime, which means + * that the entire system is still running normally. This mode is + * usually triggered automatically by the device driver and requires + * the ability to enter and exit the low power modes in a very short + * time, so there is not much impact in usability. * - * @IWL_D0I3_MODE_OFF - d0i3 is disabled - * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle - * (e.g. no active references) - * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend - * (in case of 'any' trigger) + * The terms used for the device's behavior are as follows: + * + * - D0: the device is fully powered and the host is awake; + * - D3: the device is in low power mode and only reacts to + * specific events (e.g. magic-packet received or scan + * results found); + * - D0I3: the device is in low power mode and reacts to any + * activity (e.g. RX); + * + * These terms reflect the power modes in the firmware and are not to + * be confused with the physical device power state. The NIC can be + * in D0I3 mode even if, for instance, the PCI device is in D3 state. + */ + +/** + * enum iwl_plat_pm_mode - platform power management mode + * + * This enumeration describes the device's platform power management + * behavior when in idle mode (i.e. runtime power management) or when + * in system-wide suspend (i.e WoWLAN). + * + * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this + * device. At runtime, this means that nothing happens and the + * device always remains in active. In system-wide suspend mode, + * it means that the all connections will be closed automatically + * by mac80211 before the platform is suspended. + * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). + * For runtime power management, this mode is not officially + * supported. + * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. */ -enum iwl_d0i3_mode { - IWL_D0I3_MODE_OFF = 0, - IWL_D0I3_MODE_ON_IDLE, - IWL_D0I3_MODE_ON_SUSPEND, +enum iwl_plat_pm_mode { + IWL_PLAT_PM_MODE_DISABLED, + IWL_PLAT_PM_MODE_D3, + IWL_PLAT_PM_MODE_D0I3, }; /** @@ -679,6 +773,12 @@ enum iwl_d0i3_mode { * the opmode. * @paging_download_buf: Buffer used for copying all of the pages before * downloading them to the FW. The buffer is allocated in the opmode + * @system_pm_mode: the system-wide power management mode in use. + * This mode is set dynamically, depending on the WoWLAN values + * configured from the userspace at runtime. + * @runtime_pm_mode: the runtime power management mode in use. This + * mode is set during the initialization phase and is not + * supposed to change during runtime. */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -698,6 +798,9 @@ struct iwl_trans { bool pm_support; bool ltr_enabled; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + u8 num_rx_queues; /* The following fields are internal only */ @@ -726,21 +829,24 @@ struct iwl_trans { struct iwl_fw_paging *paging_db; void *paging_download_buf; - enum iwl_d0i3_mode d0i3_mode; - - bool wowlan_d0i3; + enum iwl_plat_pm_mode system_pm_mode; + enum iwl_plat_pm_mode runtime_pm_mode; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); }; +const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); +int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); + static inline void iwl_trans_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { trans->op_mode = trans_cfg->op_mode; trans->ops->configure(trans, trans_cfg); + WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); } static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) @@ -860,41 +966,13 @@ static inline void iwl_trans_resume(struct iwl_trans *trans) static inline struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv *trigger) + const struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trans->ops->dump_data) return NULL; return trans->ops->dump_data(trans, trigger); } -static inline int iwl_trans_send_cmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - int ret; - - if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL, &trans->status))) - return -ERFKILL; - - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; - - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_acquire_read(&trans->sync_cmd_lockdep_map); - - ret = trans->ops->send_cmd(trans, cmd); - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_release(&trans->sync_cmd_lockdep_map); - - return ret; -} - static inline struct iwl_device_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { @@ -907,6 +985,8 @@ iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) (dev_cmd_ptr + trans->dev_cmd_headroom); } +int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); + static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, struct iwl_device_cmd *dev_cmd) { @@ -921,8 +1001,10 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } return trans->ops->tx(trans, skb, dev_cmd, queue); } @@ -930,8 +1012,10 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } trans->ops->reclaim(trans, queue, ssn, skbs); } @@ -949,8 +1033,10 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, { might_sleep(); - if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } @@ -990,26 +1076,36 @@ static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } if (trans->ops->freeze_txq_timer) trans->ops->freeze_txq_timer(trans, txqs, freeze); } -static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, - u32 txqs) +static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, + bool block) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } - return trans->ops->wait_tx_queue_empty(trans, txqs); + if (trans->ops->block_txq_ptrs) + trans->ops->block_txq_ptrs(trans, block); } -static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) +static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, + u32 txqs) { - return trans->ops->dbgfs_register(trans, dir); + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } + + return trans->ops->wait_tx_queue_empty(trans, txqs); } static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1085,9 +1181,9 @@ iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) trans->ops->set_bits_mask(trans, reg, mask, value); } -#define iwl_trans_grab_nic_access(trans, silent, flags) \ +#define iwl_trans_grab_nic_access(trans, flags) \ __cond_lock(nic_access, \ - likely((trans)->ops->grab_nic_access(trans, silent, flags))) + likely((trans)->ops->grab_nic_access(trans, flags))) static inline void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 8c2c3d13b092..23e7e2937566 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -1,12 +1,12 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o -iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o +iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o iwlmvm-y += power.o coex.o coex_legacy.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o -iwlmvm-$(CONFIG_PM_SLEEP) += d3.o +iwlmvm-y += tof.o fw-dbg.o +iwlmvm-$(CONFIG_PM) += d3.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index a1376539d2dc..7cb68f6ed1b0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index e290ac67d975..2e098f8e0f83 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -443,11 +443,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) if (iwl_mvm_bt_is_plcr_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); - if (IWL_MVM_BT_COEX_MPLUT) { + if (iwl_mvm_is_mplut_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); - bt_cmd.enabled_modules |= - cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED); - } bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); @@ -904,6 +901,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac) { __le16 fc = hdr->frame_control; + bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); if (info->band != IEEE80211_BAND_2GHZ) return 0; @@ -911,22 +909,27 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, if (unlikely(mvm->bt_tx_prio)) return mvm->bt_tx_prio - 1; - /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ - if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO || - is_multicast_ether_addr(hdr->addr1) || - ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) || - ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) - return 3; - - switch (ac) { - case IEEE80211_AC_BE: - return 1; - case IEEE80211_AC_VO: + if (likely(ieee80211_is_data(fc))) { + if (likely(ieee80211_is_data_qos(fc))) { + switch (ac) { + case IEEE80211_AC_BE: + return mplut_enabled ? 1 : 0; + case IEEE80211_AC_VI: + return mplut_enabled ? 2 : 3; + case IEEE80211_AC_VO: + return 3; + default: + return 0; + } + } else if (is_multicast_ether_addr(hdr->addr1)) { + return 3; + } else + return 0; + } else if (ieee80211_is_mgmt(fc)) { + return ieee80211_is_disassoc(fc) ? 0 : 3; + } else if (ieee80211_is_ctl(fc)) { + /* ignore cfend and cfendack frames as we never send those */ return 3; - case IEEE80211_AC_VI: - return 2; - default: - break; } return 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c index 61c07b05fcaa..015045733444 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 5c21231e195d..b00c03fcd447 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -106,6 +106,7 @@ #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 +#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 29ae58ebf223..d3e21d95cece 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -104,9 +104,13 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct inet6_ifaddr *ifa; int idx = 0; + memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); + read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { mvmvif->target_ipv6_addrs[idx] = ifa->addr; + if (ifa->flags & IFA_F_TENTATIVE) + __set_bit(idx, mvmvif->tentative_addrs); idx++; if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) break; @@ -133,10 +137,32 @@ static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) out[i] = cpu_to_le16(p1k[i]); } +static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, + struct iwl_mvm_key_pn *ptk_pn, + struct ieee80211_key_seq *seq, + int tid, int queues) +{ + const u8 *ret = seq->ccmp.pn; + int i; + + /* get the PN from mac80211, used on the default queue */ + ieee80211_get_key_rx_seq(key, tid, seq); + + /* and use the internal data for the other queues */ + for (i = 1; i < queues; i++) { + const u8 *tmp = ptk_pn->q[i].pn[tid]; + + if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) + ret = tmp; + } + + return ret; +} + struct wowlan_key_data { struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; struct iwl_wowlan_tkip_params_cmd *tkip; - bool error, use_rsc_tsc, use_tkip; + bool error, use_rsc_tsc, use_tkip, configure_keys; int wep_key_idx; }; @@ -158,8 +184,6 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, u16 p1k[IWL_P1K_SIZE]; int ret, i; - mutex_lock(&mvm->mutex); - switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ @@ -195,20 +219,25 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, wkc.wep_key.key_offset = data->wep_key_idx; } - ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); - data->error = ret != 0; - - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; + if (data->configure_keys) { + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, + sizeof(wkc), &wkc); + data->error = ret != 0; + + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + mutex_unlock(&mvm->mutex); + } /* don't upload key again */ - goto out_unlock; + return; } default: data->error = true; - goto out_unlock; + return; case WLAN_CIPHER_SUITE_AES_CMAC: /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them @@ -217,7 +246,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, * IGTK for anything. This means we could spuriously wake up or * be deauthenticated, but that was considered acceptable. */ - goto out_unlock; + return; case WLAN_CIPHER_SUITE_TKIP: if (sta) { tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; @@ -287,46 +316,71 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, /* * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 for checking the IV in the frames. + * mac80211/our RX code use TID 0 for checking the PN. */ - for (i = 0; i < IWL_NUM_RSC; i++) { - u8 *pn = seq.ccmp.pn; + if (sta && iwl_mvm_has_new_rx_api(mvm)) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + const u8 *pn; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + ptk_pn = rcu_dereference_protected( + mvmsta->ptk_pn[key->keyidx], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!ptk_pn)) + break; - ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc[i].pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, + mvm->trans->num_rx_queues); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + } else { + for (i = 0; i < IWL_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } } data->use_rsc_tsc = true; break; } - /* - * The D3 firmware hardcodes the key offset 0 as the key it uses - * to transmit packets to the AP, i.e. the PTK. - */ - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); - } else { + if (data->configure_keys) { + mutex_lock(&mvm->mutex); /* - * firmware only supports TSC/RSC for a single key, - * so if there are multiple keep overwriting them - * with new ones -- this relies on mac80211 doing - * list_add_tail(). + * The D3 firmware hardcodes the key offset 0 as the key it + * uses to transmit packets to the AP, i.e. the PTK. */ - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + } else { + /* + * firmware only supports TSC/RSC for a single key, + * so if there are multiple keep overwriting them + * with new ones -- this relies on mac80211 doing + * list_add_tail(). + */ + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + } + mutex_unlock(&mvm->mutex); + data->error = ret != 0; } - - data->error = ret != 0; -out_unlock: - mutex_unlock(&mvm->mutex); } static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, @@ -771,6 +825,9 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + /* the fw is reset, so all the keys are cleared */ + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; @@ -793,6 +850,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; + wowlan_config_cmd->flags = ENABLE_L3_FILTERING | + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); @@ -842,20 +901,127 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, return 0; } -static int -iwl_mvm_wowlan_config(struct iwl_mvm *mvm, - struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, - struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, - struct ieee80211_sta *ap_sta) +static void +iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *data) +{ + struct ieee80211_sta *ap_sta; + + rcu_read_lock(); + + ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]); + if (IS_ERR_OR_NULL(ap_sta)) + goto out; + + ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data); +out: + rcu_read_unlock(); +} + +int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool d0i3, + u32 cmd_flags) { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; struct wowlan_key_data key_data = { + .configure_keys = !d0i3, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, }; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; + + /* + * if we have to configure keys, call ieee80211_iter_keys(), + * as we need non-atomic context in order to take the + * required locks. + * for the d0i3 we can't use ieee80211_iter_keys(), as + * taking (almost) any mutex might result in deadlock. + */ + if (!d0i3) { + /* + * Note that currently we don't propagate cmd_flags + * to the iterator. In case of key_data.configure_keys, + * all the configured commands are SYNC, and + * iwl_mvm_wowlan_program_keys() will take care of + * locking/unlocking mvm->mutex. + */ + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_program_keys, + &key_data); + } else { + iwl_mvm_iter_d0i3_ap_keys(mvm, vif, + iwl_mvm_wowlan_program_keys, + &key_data); + } + + if (key_data.error) { + ret = -EIO; + goto out; + } + + if (key_data.use_rsc_tsc) { + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TSC_RSC_PARAM, cmd_flags, + sizeof(*key_data.rsc_tsc), + key_data.rsc_tsc); + if (ret) + goto out; + } + + if (key_data.use_tkip) { + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + cmd_flags, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto out; + } + + /* configure rekey data only if offloaded rekey is supported (d3) */ + if (mvmvif->rekey_data.valid && !d0i3) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, + NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, + NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; + + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_KEK_KCK_MATERIAL, cmd_flags, + sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto out; + } + ret = 0; +out: + kfree(key_data.rsc_tsc); + return ret; +} + +static int +iwl_mvm_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ int ret; ret = iwl_mvm_switch_to_d3(mvm); @@ -866,10 +1032,6 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, if (ret) return ret; - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - if (!iwlwifi_mod_params.sw_crypto) { /* * This needs to be unlocked due to lock ordering @@ -877,74 +1039,28 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); - ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_wowlan_program_keys, - &key_data); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, + CMD_ASYNC); mutex_lock(&mvm->mutex); - if (key_data.error) { - ret = -EIO; - goto out; - } - - if (key_data.use_rsc_tsc) { - struct iwl_host_cmd rsc_tsc_cmd = { - .id = WOWLAN_TSC_RSC_PARAM, - .data[0] = key_data.rsc_tsc, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .len[0] = sizeof(*key_data.rsc_tsc), - }; - - ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd); - if (ret) - goto out; - } - - if (key_data.use_tkip) { - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_TKIP_PARAM, - 0, sizeof(tkip_cmd), - &tkip_cmd); - if (ret) - goto out; - } - - if (mvmvif->rekey_data.valid) { - memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); - memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, - NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); - memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, - NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); - kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; - - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_KEK_KCK_MATERIAL, 0, - sizeof(kek_kck_cmd), - &kek_kck_cmd); - if (ret) - goto out; - } + if (ret) + return ret; } ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd), wowlan_config_cmd); if (ret) - goto out; + return ret; ret = iwl_mvm_send_patterns(mvm, wowlan); if (ret) - goto out; + return ret; - ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0); + ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); if (ret) - goto out; + return ret; ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); - -out: - kfree(key_data.rsc_tsc); return ret; } @@ -1057,13 +1173,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { /* if we're not associated, this must be netdetect */ - if (!wowlan->nd_config && !mvm->nd_config) { + if (!wowlan->nd_config) { ret = 1; goto out_noreset; } ret = iwl_mvm_netdetect_config( - mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif); + mvm, wowlan, wowlan->nd_config, vif); if (ret) goto out; @@ -1159,19 +1275,20 @@ remove_notif: int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_trans *trans = mvm->trans; int ret; /* make sure the d0i3 exit work is not pending */ flush_work(&mvm->d0i3_exit_work); - ret = iwl_trans_suspend(mvm->trans); + ret = iwl_trans_suspend(trans); if (ret) return ret; - mvm->trans->wowlan_d0i3 = wowlan->any; - if (mvm->trans->wowlan_d0i3) { - /* 'any' trigger means d0i3 usage */ - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + if (wowlan->any) { + trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; + + if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { ret = iwl_mvm_enter_d0i3_sync(mvm); if (ret) @@ -1182,11 +1299,13 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); - iwl_trans_d3_suspend(mvm->trans, false); + iwl_trans_d3_suspend(trans, false); return 0; } + trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + return __iwl_mvm_suspend(hw, wowlan, false); } @@ -1216,6 +1335,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, goto report; } + pm_wakeup_event(mvm->dev, 0); + if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) wakeup.magic_pkt = true; @@ -1351,18 +1472,42 @@ static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, seq->tkip.iv16 = le16_to_cpu(sc->iv16); } -static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs, +static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { int tid; BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); - for (tid = 0; tid < IWL_NUM_RSC; tid++) { - struct ieee80211_key_seq seq = {}; + if (sta && iwl_mvm_has_new_rx_api(mvm)) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; - iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!ptk_pn)) + return; + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + struct ieee80211_key_seq seq = {}; + int i; + + iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); + ieee80211_set_key_rx_seq(key, tid, &seq); + for (i = 1; i < mvm->trans->num_rx_queues; i++) + memcpy(ptk_pn->q[i].pn[tid], + seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); + } + } else { + for (tid = 0; tid < IWL_NUM_RSC; tid++) { + struct ieee80211_key_seq seq = {}; + + iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); + ieee80211_set_key_rx_seq(key, tid, &seq); + } } } @@ -1381,14 +1526,15 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, } } -static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, +static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, + struct ieee80211_key_conf *key, struct iwl_wowlan_status *status) { union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key); + iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); @@ -1399,6 +1545,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, } struct iwl_mvm_d3_gtk_iter_data { + struct iwl_mvm *mvm; struct iwl_wowlan_status *status; void *last_gtk; u32 cipher; @@ -1406,7 +1553,7 @@ struct iwl_mvm_d3_gtk_iter_data { int num_keys; }; -static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, +static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, @@ -1447,7 +1594,8 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, + sta, key); atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: @@ -1470,7 +1618,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, if (data->status->num_of_gtk_rekeys) ieee80211_remove_key(key); else if (data->last_gtk == key) - iwl_mvm_set_key_rx_seq(key, data->status); + iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, @@ -1479,6 +1627,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_d3_gtk_iter_data gtkdata = { + .mvm = mvm, .status = status, }; u32 disconnection_reasons = @@ -1494,7 +1643,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, /* find last GTK that we used initially, if any */ gtkdata.find_phase = true; ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_gtks, >kdata); + iwl_mvm_d3_update_keys, >kdata); /* not trying to keep connections with MFP/unhandled ciphers */ if (gtkdata.unhandled_cipher) return false; @@ -1509,7 +1658,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, */ gtkdata.find_phase = false; ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_gtks, >kdata); + iwl_mvm_d3_update_keys, >kdata); if (status->num_of_gtk_rekeys) { struct ieee80211_key_conf *key; @@ -1540,7 +1689,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, key = ieee80211_gtk_rekey_add(vif, &conf.conf); if (IS_ERR(key)) return false; - iwl_mvm_set_key_rx_seq(key, status); + iwl_mvm_set_key_rx_seq(mvm, key, status); } if (status->num_of_gtk_rekeys) { @@ -1689,6 +1838,30 @@ out_unlock: return false; } +void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status *status) +{ + struct iwl_mvm_d3_gtk_iter_data gtkdata = { + .mvm = mvm, + .status = status, + }; + + /* + * rekey handling requires taking locks that can't be taken now. + * however, d0i3 doesn't offload rekey, so we're fine. + */ + if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) + return; + + /* find last GTK that we used initially, if any */ + gtkdata.find_phase = true; + iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); + + gtkdata.find_phase = false; + iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); +} + struct iwl_mvm_nd_query_results { u32 matched_profiles; struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; @@ -1947,8 +2120,9 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) { bool exit_now; enum iwl_d3_status d3_status; + struct iwl_trans *trans = mvm->trans; - iwl_trans_d3_resume(mvm->trans, &d3_status, false); + iwl_trans_d3_resume(trans, &d3_status, false); /* * make sure to clear D0I3_DEFER_WAKEUP before @@ -1965,9 +2139,9 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) _iwl_mvm_exit_d0i3(mvm); } - iwl_trans_resume(mvm->trans); + iwl_trans_resume(trans); - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); if (ret) @@ -1983,12 +2157,16 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; - /* 'any' trigger means d0i3 was used */ - if (hw->wiphy->wowlan_config->any) - return iwl_mvm_resume_d0i3(mvm); + if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) + ret = iwl_mvm_resume_d0i3(mvm); else - return iwl_mvm_resume_d3(mvm); + ret = iwl_mvm_resume_d3(mvm); + + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + + return ret; } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) @@ -2012,6 +2190,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) ieee80211_stop_queues(mvm->hw); synchronize_net(); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + /* start pseudo D3 */ rtnl_lock(); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); @@ -2066,9 +2246,13 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) int remaining_time = 10; mvm->d3_test_active = false; + rtnl_lock(); __iwl_mvm_resume(mvm, true); rtnl_unlock(); + + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_abort_notification_waits(&mvm->notif_wait); ieee80211_restart_hw(mvm->hw); diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 7904b41a04c6..9e0d46368cdd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 05928fb4021d..90500e2d107b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -65,6 +65,7 @@ #include <linux/vmalloc.h> #include "mvm.h" +#include "fw-dbg.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" @@ -512,6 +513,10 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, pos += scnprintf(buf+pos, bufsz-pos, "antenna isolation = %d CORUN LUT index = %d\n", mvm->last_ant_isol, mvm->last_corun_lut); + pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", + notif->rrc_enabled); + pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", + notif->ttc_enabled); } else { struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; @@ -530,8 +535,19 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, pos += scnprintf(buf+pos, bufsz-pos, "antenna isolation = %d CORUN LUT index = %d\n", mvm->last_ant_isol, mvm->last_corun_lut); + pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", + (notif->ttc_rrc_status >> 4) & 0xF); + pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", + notif->ttc_rrc_status & 0xF); } + pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", + IWL_MVM_BT_COEX_SYNC2SCO); + pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", + IWL_MVM_BT_COEX_MPLUT); + pos += scnprintf(buf + pos, bufsz - pos, "corunning = %d\n", + IWL_MVM_BT_COEX_CORUNNING); + mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); @@ -945,6 +961,44 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +/* + * Enable / Disable continuous recording. + * Cause the FW to start continuous recording, by sending the relevant hcmd. + * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING. + * Disable: for 0 as input, DISABLE_CONT_RECORDING. + */ +static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_trans *trans = mvm->trans; + const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; + struct iwl_continuous_record_cmd cont_rec = {}; + int ret, rec_mode; + + if (!dest) + return -EOPNOTSUPP; + + if (dest->monitor_mode != SMEM_MODE || + trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + return -EOPNOTSUPP; + + ret = kstrtouint(buf, 0, &rec_mode); + if (ret) + return ret; + + cont_rec.record_mode.enable_recording = rec_mode ? + cpu_to_le16(ENABLE_CONT_RECORDING) : + cpu_to_le16(DISABLE_CONT_RECORDING); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0, + sizeof(cont_rec), &cont_rec); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) @@ -975,7 +1029,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL); + iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, + (count - 1), NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); @@ -1262,6 +1317,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); + PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -1396,7 +1452,8 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); -MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); +MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); @@ -1440,6 +1497,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); if (!debugfs_create_bool("enable_scan_iteration_notif", S_IRUSR | S_IWUSR, mvm->debugfs_dir, @@ -1476,10 +1534,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) goto err; #endif - if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, - mvm->debugfs_dir, - &mvm->low_latency_agg_frame_limit)) - goto err; if (!debugfs_create_u8("ps_disabled", S_IRUSR, mvm->debugfs_dir, &mvm->ps_disabled)) goto err; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h index 8c4190e7e027..ede6ef8d390e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h index d398a6102805..2a33b694ba10 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index 20521bebb0b1..62b9a0a96700 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -94,10 +95,14 @@ struct iwl_d3_manager_config { * enum iwl_d3_proto_offloads - enabled protocol offloads * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled + * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid + * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid */ enum iwl_proto_offloads { IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), IWL_D3_PROTO_OFFLOAD_NS = BIT(1), + IWL_D3_PROTO_IPV4_VALID = BIT(2), + IWL_D3_PROTO_IPV6_VALID = BIT(3), }; #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 @@ -241,6 +246,13 @@ enum iwl_wowlan_wakeup_filters { IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ +enum iwl_wowlan_flags { + IS_11W_ASSOC = BIT(0), + ENABLE_L3_FILTERING = BIT(1), + ENABLE_NBNS_FILTERING = BIT(2), + ENABLE_DHCP_FILTERING = BIT(3), +}; + struct iwl_wowlan_config_cmd { __le32 wakeup_filter; __le16 non_qos_seq; @@ -248,8 +260,9 @@ struct iwl_wowlan_config_cmd { u8 wowlan_ba_teardown_tids; u8 is_11n_connection; u8 offloading_tid; - u8 reserved[3]; -} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */ + u8 flags; + u8 reserved[2]; +} __packed; /* WOWLAN_CONFIG_API_S_VER_4 */ /* * WOWLAN_TSC_RSC_PARAMS diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index f3f3ee0a766b..95ac59d088b1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h index c8f3e2536cbb..65a7c8a4cacf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h index 0f1ea80a55ef..ad9cc03e16c4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index 9b7e49d4620f..fb6d341d6f3d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -68,6 +68,8 @@ #ifndef __fw_api_rx_h__ #define __fw_api_rx_h__ +/* API for pre-9000 hardware */ + #define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 #define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff @@ -77,6 +79,11 @@ #define IWL_RX_INFO_ENERGY_ANT_B_POS 8 #define IWL_RX_INFO_ENERGY_ANT_C_POS 16 +enum iwl_mac_context_info { + MAC_CONTEXT_INFO_NONE, + MAC_CONTEXT_INFO_GSCAN, +}; + /** * struct iwl_rx_phy_info - phy info * (REPLY_RX_PHY_CMD = 0xc0) @@ -95,6 +102,8 @@ * @frame_time: frame's time on the air, based on byte count and frame rate * calculation * @mac_active_msk: what MACs were active when the frame was received + * @mac_context_info: additional info on the context in which the frame was + * received as defined in &enum iwl_mac_context_info * * Before each Rx, the device sends this data. It contains PHY information * about the reception of the packet. @@ -112,7 +121,8 @@ struct iwl_rx_phy_info { __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; __le32 rate_n_flags; __le32 byte_count; - __le16 mac_active_msk; + u8 mac_active_msk; + u8 mac_context_info; __le16 frame_time; } __packed; @@ -229,10 +239,130 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), - RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), + RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, + RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), }; +/* 9000 series API */ +enum iwl_rx_mpdu_mac_flags1 { + IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0, + /* shift should be 4, but the length is measured in 2-byte + * words, so shifting only by 3 gives a byte result + */ + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, +}; + +enum iwl_rx_mpdu_mac_flags2 { + /* in 2-byte words */ + IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f, + IWL_RX_MPDU_MFLG2_PAD = 0x20, + IWL_RX_MPDU_MFLG2_AMSDU = 0x40, +}; + +enum iwl_rx_mpdu_amsdu_info { + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x3f, + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x40, + /* 0x80 bit reserved for now */ +}; + +enum iwl_rx_l3l4_flags { + IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), + IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), + IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), + IWL_RX_L3L4_TCP_ACK = BIT(3), + IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4, + IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, + IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, +}; + +enum iwl_rx_mpdu_status { + IWL_RX_MPDU_STATUS_CRC_OK = BIT(0), + IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), + IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), + IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), + IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), + IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), + IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), + /* TODO - verify this is the correct value */ + IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), + IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, + IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, + IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, + IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, + IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, + /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */ + IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, + /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */ + IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, + IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), + IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), + IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), + IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), + IWL_RX_MPDU_STATUS_KEY_COLOR = BIT(15), +}; + +enum iwl_rx_mpdu_hash_filter { + IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f, + IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0, +}; + +enum iwl_rx_mpdu_sta_id_flags { + IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f, + IWL_RX_MPDU_SIF_RRF_ABORT = 0x20, + IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0, +}; + +#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f + +enum iwl_rx_mpdu_reorder_data { + IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff, + IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000, + IWL_RX_MPDU_REORDER_SN_SHIFT = 12, + IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000, + IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, + IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, +}; + +struct iwl_rx_mpdu_desc { + /* DW2 */ + __le16 mpdu_len; + u8 mac_flags1; + u8 mac_flags2; + /* DW3 */ + u8 amsdu_info; + __le16 reserved_for_software; + u8 mac_phy_idx; + /* DW4 */ + __le16 raw_csum; /* alledgedly unreliable */ + __le16 l3l4_flags; + /* DW5 */ + __le16 status; + u8 hash_filter; + u8 sta_id_flags; + /* DW6 */ + __le32 reorder_data; + /* DW7 */ + __le32 rss_hash; + /* DW8 */ + __le32 filter_match; + /* DW9 */ + __le32 gp2_on_air_rise; + /* DW10 */ + __le32 rate_n_flags; + /* DW11 */ + u8 energy_a, energy_b, energy_c, channel; + /* DW12 & DW13 */ + __le64 tsf_on_air_rise; +} __packed; + +struct iwl_frame_release { + u8 baid; + u8 reserved; + __le16 nssn; +}; + #endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index 3a657e4b60ac..f01dab0d0dac 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -285,6 +285,8 @@ struct iwl_scan_channel_opt { * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * and DS parameter set IEs into probe requests. + * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels + * 1, 6 and 11. * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches */ enum iwl_mvm_lmac_scan_flags { @@ -295,6 +297,7 @@ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), + IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7), IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), }; @@ -322,6 +325,7 @@ enum iwl_scan_priority_ext { * @active-dwell: dwell time for active channels * @passive-dwell: dwell time for passive channels * @fragmented-dwell: dwell time for fragmented passive scan + * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases) * @reserved2: for alignment and future use * @rx_chain_selct: PHY_RX_CHAIN_* flags * @scan_flags: &enum iwl_mvm_lmac_scan_flags @@ -346,7 +350,8 @@ struct iwl_scan_req_lmac { u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; - __le16 reserved2; + u8 extended_dwell; + u8 reserved2; __le16 rx_chain_select; __le32 scan_flags; __le32 max_out_time; @@ -490,7 +495,7 @@ enum iwl_channel_flags { * @dwell_active: default dwell time for active scan * @dwell_passive: default dwell time for passive scan * @dwell_fragmented: default dwell time for fragmented scan - * @reserved: for future use and alignment + * @dwell_extended: default dwell time for channels 1, 6 and 11 * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags @@ -507,7 +512,7 @@ struct iwl_scan_config { u8 dwell_active; u8 dwell_passive; u8 dwell_fragmented; - u8 reserved; + u8 dwell_extended; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; @@ -543,7 +548,8 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), - IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9) + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), }; /** @@ -597,7 +603,7 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved1: for future use and alignment + * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan @@ -606,7 +612,7 @@ struct iwl_scan_req_umac_tail { * @scan_priority: scan internal prioritization &enum iwl_scan_priority * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request - * @reserved2: for future use and alignment + * @reserved: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ @@ -616,7 +622,7 @@ struct iwl_scan_req_umac { __le32 ooc_priority; /* SCAN_GENERAL_PARAMS_API_S_VER_1 */ __le32 general_flags; - u8 reserved1; + u8 extended_dwell; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; @@ -626,7 +632,7 @@ struct iwl_scan_req_umac { /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved2; + __le16 reserved; u8 data[]; } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 493a8bdfbc9e..6fca4fb1d306 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h index 0c321f63ee42..438665a54923 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h index eed6271d01a3..86aa51b2210e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 853698ab8b05..0036d18334af 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 181590fbd3b3..82049bb139c2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -213,6 +215,7 @@ enum { REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, + FRAME_RELEASE = 0xc3, BA_NOTIF = 0xc5, /* Location Aware Regulatory */ @@ -239,6 +242,7 @@ enum { DTS_MEASUREMENT_NOTIFICATION = 0xdd, REPLY_DEBUG_CMD = 0xf0, + LDBG_CONFIG_CMD = 0xf6, DEBUG_LOG_MSG = 0xf7, BCAST_FILTER_CMD = 0xcf, @@ -268,6 +272,9 @@ enum { REPLY_MAX = 0xff, }; +/* Please keep this enum *SORTED* by hex value. + * Needed for binary search, otherwise a warning will be triggered. + */ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, @@ -275,6 +282,8 @@ enum iwl_phy_ops_subcmd_ids { /* command groups */ enum { + LEGACY_GROUP = 0x0, + LONG_GROUP = 0x1, PHY_OPS_GROUP = 0x4, }; @@ -426,6 +435,26 @@ struct iwl_fw_get_item_cmd { __le32 item_id; } __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ +#define CONT_REC_COMMAND_SIZE 80 +#define ENABLE_CONT_RECORDING 0x15 +#define DISABLE_CONT_RECORDING 0x16 + +/* + * struct iwl_continuous_record_mode - recording mode + */ +struct iwl_continuous_record_mode { + __le16 enable_recording; +} __packed; + +/* + * struct iwl_continuous_record_cmd - enable/disable continuous recording + */ +struct iwl_continuous_record_cmd { + struct iwl_continuous_record_mode record_mode; + u8 pad[CONT_REC_COMMAND_SIZE - + sizeof(struct iwl_continuous_record_mode)]; +} __packed; + struct iwl_fw_get_item_resp { __le32 item_id; __le32 item_byte_cnt; @@ -1425,6 +1454,22 @@ struct iwl_sf_cfg_cmd { ***********************************/ /** + * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + */ +struct iwl_mcc_update_cmd_v1 { + __le16 mcc; + u8 source_id; + u8 reserved; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ + +/** * struct iwl_mcc_update_cmd - Request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. @@ -1433,12 +1478,39 @@ struct iwl_sf_cfg_cmd { * @mcc: given mobile country code * @source_id: the source from where we got the MCC, see iwl_mcc_source * @reserved: reserved for alignment + * @key: integrity key for MCC API OEM testing + * @reserved2: reserved */ struct iwl_mcc_update_cmd { __le16 mcc; u8 source_id; u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S */ + __le32 key; + __le32 reserved2[5]; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ + +/** + * iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v1 { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ /** * iwl_mcc_update_resp - response to MCC_UPDATE_CMD. @@ -1449,6 +1521,8 @@ struct iwl_mcc_update_cmd { * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source + * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @reserved: reserved. * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 * channels, depending on platform) * @channels: channel control data map, DWORD for each channel. Only the first @@ -1459,9 +1533,11 @@ struct iwl_mcc_update_resp { __le16 mcc; u8 cap; u8 source_id; + __le16 time; + __le16 reserved; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */ +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ /** * struct iwl_mcc_chub_notif - chub notifies of mcc change @@ -1491,6 +1567,9 @@ enum iwl_mcc_update_status { MCC_RESP_NVM_DISABLED, MCC_RESP_ILLEGAL, MCC_RESP_LOW_PRIORITY, + MCC_RESP_TEST_MODE_ACTIVE, + MCC_RESP_TEST_MODE_NOT_ACTIVE, + MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, }; enum iwl_mcc_source { @@ -1503,7 +1582,9 @@ enum iwl_mcc_source { MCC_SOURCE_RESERVED = 6, MCC_SOURCE_DEFAULT = 7, MCC_SOURCE_UNINITIALIZED = 8, - MCC_SOURCE_GET_CURRENT = 0x10 + MCC_SOURCE_MCC_API = 9, + MCC_SOURCE_GET_CURRENT = 0x10, + MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; /* DTS measurements */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c new file mode 100644 index 000000000000..0813f8184e10 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -0,0 +1,817 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/devcoredump.h> + +#include "fw-dbg.h" +#include "iwl-io.h" +#include "mvm.h" +#include "iwl-prph.h" +#include "iwl-csr.h" + +static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen) +{ + const struct iwl_mvm_dump_ptrs *dump_ptrs = data; + ssize_t bytes_read; + ssize_t bytes_read_trans; + + if (offset < dump_ptrs->op_mode_len) { + bytes_read = min_t(ssize_t, count, + dump_ptrs->op_mode_len - offset); + memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset, + bytes_read); + offset += bytes_read; + count -= bytes_read; + + if (count == 0) + return bytes_read; + } else { + bytes_read = 0; + } + + if (!dump_ptrs->trans_ptr) + return bytes_read; + + offset -= dump_ptrs->op_mode_len; + bytes_read_trans = min_t(ssize_t, count, + dump_ptrs->trans_ptr->len - offset); + memcpy(buffer + bytes_read, + (u8 *)dump_ptrs->trans_ptr->data + offset, + bytes_read_trans); + + return bytes_read + bytes_read_trans; +} + +static void iwl_mvm_free_coredump(const void *data) +{ + const struct iwl_mvm_dump_ptrs *fw_error_dump = data; + + vfree(fw_error_dump->op_mode_ptr); + vfree(fw_error_dump->trans_ptr); + kfree(fw_error_dump); +} + +#define RADIO_REG_MAX_READ 0x2ad +static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data) +{ + u8 *pos = (void *)(*dump_data)->data; + unsigned long flags; + int i; + + if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + return; + + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); + (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); + + for (i = 0; i < RADIO_REG_MAX_READ; i++) { + u32 rd_cmd = RADIO_RSP_RD_CMD; + + rd_cmd |= i << RADIO_RSP_ADDR_POS; + iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd); + *pos = (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT); + + pos++; + } + + *dump_data = iwl_fw_error_next_data(*dump_data); + + iwl_trans_release_nic_access(mvm->trans, &flags); +} + +static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + unsigned long flags; + int i, j; + + if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + return; + + /* Pull RXF data from all RXFs */ + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { + /* + * Keep aside the additional offset that might be needed for + * next RXF + */ + u32 offset_diff = RXF_DIFF_FROM_PREV * i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the RXF */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_D_SPACE + + offset_diff)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_WR_PTR + + offset_diff)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_RD_PTR + + offset_diff)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_FENCE_PTR + + offset_diff)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_SET_FENCE_MODE + + offset_diff)); + + /* Lock fence */ + iwl_trans_write_prph(mvm->trans, + RXF_SET_FENCE_MODE + offset_diff, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_WR2FENCE + offset_diff, 0x1); + /* Set fence offset */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_FENCE_OFFSET_ADDR + offset_diff, + 0x0); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = iwl_trans_read_prph(mvm->trans, + RXF_FIFO_RD_FENCE_INC + + offset_diff); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + + /* Pull TXF data from all TXFs */ + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the FIFO */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FIFO_ITEM_CNT)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_WR_PTR)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_RD_PTR)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FENCE_PTR)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_LOCK_FENCE)); + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, + TXF_WR_PTR); + + /* Dummy-read to advance the read pointer to the head */ + iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = iwl_trans_read_prph(mvm->trans, + TXF_READ_MODIFY_DATA); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + + iwl_trans_release_nic_access(mvm->trans, &flags); +} + +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) +{ + if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert) + return; + + kfree(mvm->fw_dump_desc); + mvm->fw_dump_desc = NULL; +} + +#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ +#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ + +static const struct { + u32 start, end; +} iwl_prph_dump_addr[] = { + { .start = 0x00a00000, .end = 0x00a00000 }, + { .start = 0x00a0000c, .end = 0x00a00024 }, + { .start = 0x00a0002c, .end = 0x00a0003c }, + { .start = 0x00a00410, .end = 0x00a00418 }, + { .start = 0x00a00420, .end = 0x00a00420 }, + { .start = 0x00a00428, .end = 0x00a00428 }, + { .start = 0x00a00430, .end = 0x00a0043c }, + { .start = 0x00a00444, .end = 0x00a00444 }, + { .start = 0x00a004c0, .end = 0x00a004cc }, + { .start = 0x00a004d8, .end = 0x00a004d8 }, + { .start = 0x00a004e0, .end = 0x00a004f0 }, + { .start = 0x00a00840, .end = 0x00a00840 }, + { .start = 0x00a00850, .end = 0x00a00858 }, + { .start = 0x00a01004, .end = 0x00a01008 }, + { .start = 0x00a01010, .end = 0x00a01010 }, + { .start = 0x00a01018, .end = 0x00a01018 }, + { .start = 0x00a01024, .end = 0x00a01024 }, + { .start = 0x00a0102c, .end = 0x00a01034 }, + { .start = 0x00a0103c, .end = 0x00a01040 }, + { .start = 0x00a01048, .end = 0x00a01094 }, + { .start = 0x00a01c00, .end = 0x00a01c20 }, + { .start = 0x00a01c58, .end = 0x00a01c58 }, + { .start = 0x00a01c7c, .end = 0x00a01c7c }, + { .start = 0x00a01c28, .end = 0x00a01c54 }, + { .start = 0x00a01c5c, .end = 0x00a01c5c }, + { .start = 0x00a01c60, .end = 0x00a01cdc }, + { .start = 0x00a01ce0, .end = 0x00a01d0c }, + { .start = 0x00a01d18, .end = 0x00a01d20 }, + { .start = 0x00a01d2c, .end = 0x00a01d30 }, + { .start = 0x00a01d40, .end = 0x00a01d5c }, + { .start = 0x00a01d80, .end = 0x00a01d80 }, + { .start = 0x00a01d98, .end = 0x00a01d9c }, + { .start = 0x00a01da8, .end = 0x00a01da8 }, + { .start = 0x00a01db8, .end = 0x00a01df4 }, + { .start = 0x00a01dc0, .end = 0x00a01dfc }, + { .start = 0x00a01e00, .end = 0x00a01e2c }, + { .start = 0x00a01e40, .end = 0x00a01e60 }, + { .start = 0x00a01e68, .end = 0x00a01e6c }, + { .start = 0x00a01e74, .end = 0x00a01e74 }, + { .start = 0x00a01e84, .end = 0x00a01e90 }, + { .start = 0x00a01e9c, .end = 0x00a01ec4 }, + { .start = 0x00a01ed0, .end = 0x00a01ee0 }, + { .start = 0x00a01f00, .end = 0x00a01f1c }, + { .start = 0x00a01f44, .end = 0x00a01ffc }, + { .start = 0x00a02000, .end = 0x00a02048 }, + { .start = 0x00a02068, .end = 0x00a020f0 }, + { .start = 0x00a02100, .end = 0x00a02118 }, + { .start = 0x00a02140, .end = 0x00a0214c }, + { .start = 0x00a02168, .end = 0x00a0218c }, + { .start = 0x00a021c0, .end = 0x00a021c0 }, + { .start = 0x00a02400, .end = 0x00a02410 }, + { .start = 0x00a02418, .end = 0x00a02420 }, + { .start = 0x00a02428, .end = 0x00a0242c }, + { .start = 0x00a02434, .end = 0x00a02434 }, + { .start = 0x00a02440, .end = 0x00a02460 }, + { .start = 0x00a02468, .end = 0x00a024b0 }, + { .start = 0x00a024c8, .end = 0x00a024cc }, + { .start = 0x00a02500, .end = 0x00a02504 }, + { .start = 0x00a0250c, .end = 0x00a02510 }, + { .start = 0x00a02540, .end = 0x00a02554 }, + { .start = 0x00a02580, .end = 0x00a025f4 }, + { .start = 0x00a02600, .end = 0x00a0260c }, + { .start = 0x00a02648, .end = 0x00a02650 }, + { .start = 0x00a02680, .end = 0x00a02680 }, + { .start = 0x00a026c0, .end = 0x00a026d0 }, + { .start = 0x00a02700, .end = 0x00a0270c }, + { .start = 0x00a02804, .end = 0x00a02804 }, + { .start = 0x00a02818, .end = 0x00a0281c }, + { .start = 0x00a02c00, .end = 0x00a02db4 }, + { .start = 0x00a02df4, .end = 0x00a02fb0 }, + { .start = 0x00a03000, .end = 0x00a03014 }, + { .start = 0x00a0301c, .end = 0x00a0302c }, + { .start = 0x00a03034, .end = 0x00a03038 }, + { .start = 0x00a03040, .end = 0x00a03048 }, + { .start = 0x00a03060, .end = 0x00a03068 }, + { .start = 0x00a03070, .end = 0x00a03074 }, + { .start = 0x00a0307c, .end = 0x00a0307c }, + { .start = 0x00a03080, .end = 0x00a03084 }, + { .start = 0x00a0308c, .end = 0x00a03090 }, + { .start = 0x00a03098, .end = 0x00a03098 }, + { .start = 0x00a030a0, .end = 0x00a030a0 }, + { .start = 0x00a030a8, .end = 0x00a030b4 }, + { .start = 0x00a030bc, .end = 0x00a030bc }, + { .start = 0x00a030c0, .end = 0x00a0312c }, + { .start = 0x00a03c00, .end = 0x00a03c5c }, + { .start = 0x00a04400, .end = 0x00a04454 }, + { .start = 0x00a04460, .end = 0x00a04474 }, + { .start = 0x00a044c0, .end = 0x00a044ec }, + { .start = 0x00a04500, .end = 0x00a04504 }, + { .start = 0x00a04510, .end = 0x00a04538 }, + { .start = 0x00a04540, .end = 0x00a04548 }, + { .start = 0x00a04560, .end = 0x00a0457c }, + { .start = 0x00a04590, .end = 0x00a04598 }, + { .start = 0x00a045c0, .end = 0x00a045f4 }, + { .start = 0x00a44000, .end = 0x00a7bf80 }, +}; + +static u32 iwl_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + struct iwl_fw_error_dump_prph *prph; + unsigned long flags; + u32 prph_len = 0, i; + + if (!iwl_trans_grab_nic_access(trans, &flags)) + return 0; + + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + int reg; + __le32 *val; + + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); + (*data)->len = cpu_to_le32(sizeof(*prph) + + num_bytes_in_chunk); + prph = (void *)(*data)->data; + prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); + val = (void *)prph->data; + + for (reg = iwl_prph_dump_addr[i].start; + reg <= iwl_prph_dump_addr[i].end; + reg += 4) + *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans, + reg)); + + *data = iwl_fw_error_next_data(*data); + } + + iwl_trans_release_nic_access(trans, &flags); + + return prph_len; +} + +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +{ + struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_error_dump_data *dump_data; + struct iwl_fw_error_dump_info *dump_info; + struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_trigger_desc *dump_trig; + struct iwl_mvm_dump_ptrs *fw_error_dump; + u32 sram_len, sram_ofs; + u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; + u32 smem_len = mvm->cfg->smem_len; + u32 sram2_len = mvm->cfg->dccm2_len; + bool monitor_dump_only = false; + int i; + + lockdep_assert_held(&mvm->mutex); + + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { + IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); + goto out; + } + + if (mvm->fw_dump_trig && + mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + monitor_dump_only = true; + + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); + if (!fw_error_dump) + goto out; + + /* SRAM - include stack CCM if driver knows the values for it */ + if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { + const struct fw_img *img; + + img = &mvm->fw->img[mvm->cur_ucode]; + sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } else { + sram_ofs = mvm->cfg->dccm_offset; + sram_len = mvm->cfg->dccm_len; + } + + /* reading RXF/TXF sizes */ + if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { + struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; + + fifo_data_len = 0; + + /* Count RXF size */ + for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { + if (!mem_cfg->rxfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += mem_cfg->rxfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) { + if (!mem_cfg->txfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += mem_cfg->txfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + + /* Make room for PRPH registers */ + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + prph_len += sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; + } + + file_len = sizeof(*dump_file) + + sizeof(*dump_data) * 2 + + sram_len + sizeof(*dump_mem) + + fifo_data_len + + prph_len + + radio_len + + sizeof(*dump_info); + + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + + /* Make room for fw's virtual image pages, if it exists */ + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) + file_len += mvm->num_of_paging_blk * + (sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_paging) + + PAGING_BLOCK_SIZE); + + /* If we only want a monitor dump, reset the file length */ + if (monitor_dump_only) { + file_len = sizeof(*dump_file) + sizeof(*dump_data) + + sizeof(*dump_info); + } + + /* + * In 8000 HW family B-step include the ICCM (which resides separately) + */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + IWL8260_ICCM_LEN; + + if (mvm->fw_dump_desc) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) + + mvm->fw_dump_desc->len; + + dump_file = vzalloc(file_len); + if (!dump_file) { + kfree(fw_error_dump); + goto out; + } + + fw_error_dump->op_mode_ptr = dump_file; + + dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); + dump_data = (void *)dump_file->data; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_info)); + dump_info = (void *)dump_data->data; + dump_info->device_family = + mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); + dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, + sizeof(dump_info->fw_human_readable)); + strncpy(dump_info->dev_human_readable, mvm->cfg->name, + sizeof(dump_info->dev_human_readable)); + strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, + sizeof(dump_info->bus_human_readable)); + + dump_data = iwl_fw_error_next_data(dump_data); + /* We only dump the FIFOs if the FW is in error state */ + if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { + iwl_mvm_dump_fifos(mvm, &dump_data); + if (radio_len) + iwl_mvm_read_radio_reg(mvm, &dump_data); + } + + if (mvm->fw_dump_desc) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_trig) + + mvm->fw_dump_desc->len); + dump_trig = (void *)dump_data->data; + memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, + sizeof(*dump_trig) + mvm->fw_dump_desc->len); + + dump_data = iwl_fw_error_next_data(dump_data); + } + + /* In case we only want monitor dump, skip to dump trasport data */ + if (monitor_dump_only) + goto dump_trans_data; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(sram_ofs); + iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + sram_len); + + if (smem_len) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); + dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); + iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, + dump_mem->data, smem_len); + } + + if (sram2_len) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); + iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, + dump_mem->data, sram2_len); + } + + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); + iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, + dump_mem->data, IWL8260_ICCM_LEN); + } + + /* Dump fw's virtual image */ + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { + u32 i; + + for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { + struct iwl_fw_error_dump_paging *paging; + struct page *pages = + mvm->fw_paging_db[i].fw_paging_block; + + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); + dump_data->len = cpu_to_le32(sizeof(*paging) + + PAGING_BLOCK_SIZE); + paging = (void *)dump_data->data; + paging->index = cpu_to_le32(i); + memcpy(paging->data, page_address(pages), + PAGING_BLOCK_SIZE); + } + } + + dump_data = iwl_fw_error_next_data(dump_data); + if (prph_len) + iwl_dump_prph(mvm->trans, &dump_data); + +dump_trans_data: + fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, + mvm->fw_dump_trig); + fw_error_dump->op_mode_len = file_len; + if (fw_error_dump->trans_ptr) + file_len += fw_error_dump->trans_ptr->len; + dump_file->file_len = cpu_to_le32(file_len); + + dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, + GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); + +out: + iwl_mvm_free_fw_dump_desc(mvm); + mvm->fw_dump_trig = NULL; + clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); +} + +const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { + .trig_desc = { + .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), + }, +}; + +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + const struct iwl_mvm_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger) +{ + unsigned int delay = 0; + + if (trigger) + delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + + if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) + return -EBUSY; + + if (WARN_ON(mvm->fw_dump_desc)) + iwl_mvm_free_fw_dump_desc(mvm); + + IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", + le32_to_cpu(desc->trig_desc.type)); + + mvm->fw_dump_desc = desc; + mvm->fw_dump_trig = trigger; + + queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); + + return 0; +} + +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger) +{ + struct iwl_mvm_dump_desc *desc; + + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + desc->len = len; + desc->trig_desc.type = cpu_to_le32(trig); + memcpy(desc->trig_desc.data, str, len); + + return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); +} + +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) +{ + u16 occurrences = le16_to_cpu(trigger->occurrences); + int ret, len = 0; + char buf[64]; + + if (!occurrences) + return 0; + + if (fmt) { + va_list ap; + + buf[sizeof(buf) - 1] = '\0'; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + /* check for truncation */ + if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) + buf[sizeof(buf) - 1] = '\0'; + + len = strlen(buf) + 1; + } + + ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, + trigger); + + if (ret) + return ret; + + trigger->occurrences = cpu_to_le16(occurrences - 1); + return 0; +} + +static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) +{ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); + else + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); +} + +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) +{ + u8 *ptr; + int ret; + int i; + + if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), + "Invalid configuration %d\n", conf_id)) + return -EINVAL; + + /* EARLY START - firmware's configuration is hard coded */ + if ((!mvm->fw->dbg_conf_tlv[conf_id] || + !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + conf_id == FW_DBG_START_FROM_ALIVE) { + iwl_mvm_restart_early_start(mvm); + return 0; + } + + if (!mvm->fw->dbg_conf_tlv[conf_id]) + return -EINVAL; + + if (mvm->fw_dbg_conf != FW_DBG_INVALID) + IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", + mvm->fw_dbg_conf); + + /* Send all HCMDs for configuring the FW debug */ + ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, + le16_to_cpu(cmd->len), cmd->data); + if (ret) + return ret; + + ptr += sizeof(*cmd); + ptr += le16_to_cpu(cmd->len); + } + + mvm->fw_dbg_conf = conf_id; + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h new file mode 100644 index 000000000000..f7dff7612c9c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h @@ -0,0 +1,174 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __mvm_fw_dbg_h__ +#define __mvm_fw_dbg_h__ +#include "iwl-fw-file.h" +#include "iwl-fw-error-dump.h" +#include "mvm.h" + +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + const struct iwl_mvm_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) __printf(3, 4); +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); + +#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ + void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + unlikely(__dbg_trigger); \ +}) + +static inline struct iwl_fw_dbg_trigger_tlv* +_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) +{ + return fw->dbg_trigger_tlv[id]; +} + +#define iwl_fw_dbg_get_trigger(fw, id) ({ \ + BUILD_BUG_ON(!__builtin_constant_p(id)); \ + BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ + _iwl_fw_dbg_get_trigger((fw), (id)); \ +}) + +static inline bool +iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, + struct ieee80211_vif *vif) +{ + u32 trig_vif = le32_to_cpu(trig->vif_type); + + return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; +} + +static inline bool +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && + (mvm->fw_dbg_conf == FW_DBG_INVALID || + (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); +} + +static inline bool +iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + unsigned long wind_jiff = + msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms)); + u32 id = le32_to_cpu(trig->id); + + /* If this is the first event checked, jump to update start ts */ + if (mvm->fw_dbg_non_collect_ts_start[id] && + (time_after(mvm->fw_dbg_non_collect_ts_start[id] + wind_jiff, + jiffies))) + return true; + + mvm->fw_dbg_non_collect_ts_start[id] = jiffies; + return false; +} + +static inline bool +iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) + return false; + + if (iwl_fw_dbg_no_trig_window(mvm, trig)) { + IWL_WARN(mvm, "Trigger %d occurred while no-collect window.\n", + trig->id); + return false; + } + + return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); +} + +static inline void +_iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + if (!trigger) + return; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); +} + +#define iwl_fw_dbg_trigger_simple_stop(mvm, vif, trig) \ + _iwl_fw_dbg_trigger_simple_stop((mvm), (vif), \ + iwl_fw_dbg_get_trigger((mvm)->fw,\ + (trig))) + +#endif /* __mvm_fw_dbg_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index d906fa13ba97..4ed5180c547b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -74,6 +74,7 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" +#include "fw-dbg.h" #include "iwl-phy-db.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ @@ -805,137 +806,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) iwl_free_resp(&cmd); } -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - struct iwl_mvm_dump_desc *desc, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - unsigned int delay = 0; - - if (trigger) - delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - - if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) - return -EBUSY; - - if (WARN_ON(mvm->fw_dump_desc)) - iwl_mvm_free_fw_dump_desc(mvm); - - IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", - le32_to_cpu(desc->trig_desc.type)); - - mvm->fw_dump_desc = desc; - mvm->fw_dump_trig = trigger; - - queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); - - return 0; -} - -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - struct iwl_mvm_dump_desc *desc; - - desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); - if (!desc) - return -ENOMEM; - - desc->len = len; - desc->trig_desc.type = cpu_to_le32(trig); - memcpy(desc->trig_desc.data, str, len); - - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); -} - -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) -{ - u16 occurrences = le16_to_cpu(trigger->occurrences); - int ret, len = 0; - char buf[64]; - - if (!occurrences) - return 0; - - if (fmt) { - va_list ap; - - buf[sizeof(buf) - 1] = '\0'; - - va_start(ap, fmt); - vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - /* check for truncation */ - if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) - buf[sizeof(buf) - 1] = '\0'; - - len = strlen(buf) + 1; - } - - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, - trigger); - - if (ret) - return ret; - - trigger->occurrences = cpu_to_le16(occurrences - 1); - return 0; -} - -static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) -{ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) - iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - else - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); -} - -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) -{ - u8 *ptr; - int ret; - int i; - - if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), - "Invalid configuration %d\n", conf_id)) - return -EINVAL; - - /* EARLY START - firmware's configuration is hard coded */ - if ((!mvm->fw->dbg_conf_tlv[conf_id] || - !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && - conf_id == FW_DBG_START_FROM_ALIVE) { - iwl_mvm_restart_early_start(mvm); - return 0; - } - - if (!mvm->fw->dbg_conf_tlv[conf_id]) - return -EINVAL; - - if (mvm->fw_dbg_conf != FW_DBG_INVALID) - IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", - mvm->fw_dbg_conf); - - /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { - struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, - le16_to_cpu(cmd->len), cmd->data); - if (ret) - return ret; - - ptr += sizeof(*cmd); - ptr += le16_to_cpu(cmd->len); - } - - mvm->fw_dbg_conf = conf_id; - return ret; -} - static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { @@ -1073,6 +943,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index e3b3cf4dbd77..1e51fbe95f7c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index ad7ad720d2e7..bf1e5eb5dbdb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -72,6 +72,7 @@ #include "fw-api.h" #include "mvm.h" #include "time-event.h" +#include "fw-dbg.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, @@ -716,6 +717,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); + cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_ac_to_tx_fifo[i]; @@ -729,11 +732,26 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->ac[txf].fifos_mask = BIT(txf); } - /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP) { + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); + /* + * in AP mode, pass probe requests and beacons from other APs + * (needed for ht protection); when there're no any associated + * station don't ask FW to pass beacons to prevent unnecessary + * wake-ups. + */ + cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + if (mvmvif->ap_assoc_sta_count) { + cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); + IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); + } else { + IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); + } + } + if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); @@ -747,8 +765,6 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); if (ht_enabled) iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); - - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, @@ -854,11 +870,17 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; + u32 tfd_queue_msk = 0; + int ret, i; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + for (i = 0; i < IEEE80211_NUM_ACS; i++) + if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) + tfd_queue_msk |= BIT(vif->hw_queue[i]); + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | @@ -866,6 +888,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, MAC_FILTER_IN_CRC32); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + /* Allocate sniffer station */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, + vif->type); + if (ret) + return ret; + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -999,9 +1027,12 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, TX_CMD_FLG_BT_PRIO_POS; beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags); - mvm->mgmt_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->mgmt_last_antenna_idx); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { + mvm->mgmt_last_antenna_idx = + iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), + mvm->mgmt_last_antenna_idx); + } beacon_cmd.tx.rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << @@ -1140,7 +1171,6 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); @@ -1148,19 +1178,6 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - /* - * pass probe requests and beacons from other APs (needed - * for ht protection); when there're no any associated station - * don't ask FW to pass beacons to prevent unnecessary wake-ups. - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count) { - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); - } else { - IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); - } - /* Fill the data specific for ap mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, action == FW_CTXT_ACTION_ADD); @@ -1180,13 +1197,6 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - /* - * pass probe requests and beacons from other APs (needed - * for ht protection) - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST | - MAC_FILTER_IN_BEACON); - /* Fill the data specific for GO mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); @@ -1288,8 +1298,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->uploaded = false; - if (vif->type == NL80211_IFTYPE_MONITOR) + if (vif->type == NL80211_IFTYPE_MONITOR) { __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); + iwl_mvm_dealloc_snif_sta(mvm); + } return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index e88afac51c5d..d70a1716f3e0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -70,6 +70,7 @@ #include <linux/ip.h> #include <linux/if_arp.h> #include <linux/devcoredump.h> +#include <linux/time.h> #include <net/mac80211.h> #include <net/ieee80211_radiotap.h> #include <net/tcp.h> @@ -86,6 +87,7 @@ #include "iwl-prph.h" #include "iwl-csr.h" #include "iwl-nvm-parse.h" +#include "fw-dbg.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { @@ -436,6 +438,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + + if (mvm->trans->max_skb_frags) + hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = mvm->first_agg_queue; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; @@ -662,6 +669,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~NETIF_F_RXCSUM; + if (IWL_MVM_SW_TX_CSUM_OFFLOAD) + hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + ret = ieee80211_register_hw(mvm->hw); if (ret) iwl_mvm_leds_exit(mvm); @@ -939,431 +950,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); } -static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen) -{ - const struct iwl_mvm_dump_ptrs *dump_ptrs = data; - ssize_t bytes_read; - ssize_t bytes_read_trans; - - if (offset < dump_ptrs->op_mode_len) { - bytes_read = min_t(ssize_t, count, - dump_ptrs->op_mode_len - offset); - memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset, - bytes_read); - offset += bytes_read; - count -= bytes_read; - - if (count == 0) - return bytes_read; - } else { - bytes_read = 0; - } - - if (!dump_ptrs->trans_ptr) - return bytes_read; - - offset -= dump_ptrs->op_mode_len; - bytes_read_trans = min_t(ssize_t, count, - dump_ptrs->trans_ptr->len - offset); - memcpy(buffer + bytes_read, - (u8 *)dump_ptrs->trans_ptr->data + offset, - bytes_read_trans); - - return bytes_read + bytes_read_trans; -} - -static void iwl_mvm_free_coredump(const void *data) -{ - const struct iwl_mvm_dump_ptrs *fw_error_dump = data; - - vfree(fw_error_dump->op_mode_ptr); - vfree(fw_error_dump->trans_ptr); - kfree(fw_error_dump); -} - -static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) -{ - struct iwl_fw_error_dump_fifo *fifo_hdr; - u32 *fifo_data; - u32 fifo_len; - unsigned long flags; - int i, j; - - if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) - return; - - /* Pull RXF data from all RXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { - /* - * Keep aside the additional offset that might be needed for - * next RXF - */ - u32 offset_diff = RXF_DIFF_FROM_PREV * i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the RXF */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_D_SPACE + - offset_diff)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_WR_PTR + - offset_diff)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_RD_PTR + - offset_diff)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_FENCE_PTR + - offset_diff)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_SET_FENCE_MODE + - offset_diff)); - - /* Lock fence */ - iwl_trans_write_prph(mvm->trans, - RXF_SET_FENCE_MODE + offset_diff, 0x1); - /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_WR2FENCE + offset_diff, 0x1); - /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_FENCE_OFFSET_ADDR + offset_diff, - 0x0); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - RXF_FIFO_RD_FENCE_INC + - offset_diff); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - /* Pull TXF data from all TXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the FIFO */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FIFO_ITEM_CNT)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_WR_PTR)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_RD_PTR)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FENCE_PTR)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_LOCK_FENCE)); - - /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, - TXF_WR_PTR); - - /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - TXF_READ_MODIFY_DATA); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) -{ - if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert || - !mvm->fw_dump_desc) - return; - - kfree(mvm->fw_dump_desc); - mvm->fw_dump_desc = NULL; -} - -#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ -#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ - -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) -{ - struct iwl_fw_error_dump_file *dump_file; - struct iwl_fw_error_dump_data *dump_data; - struct iwl_fw_error_dump_info *dump_info; - struct iwl_fw_error_dump_mem *dump_mem; - struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_mvm_dump_ptrs *fw_error_dump; - u32 sram_len, sram_ofs; - u32 file_len, fifo_data_len = 0; - u32 smem_len = mvm->cfg->smem_len; - u32 sram2_len = mvm->cfg->dccm2_len; - bool monitor_dump_only = false; - - lockdep_assert_held(&mvm->mutex); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); - return; - } - - if (mvm->fw_dump_trig && - mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) - monitor_dump_only = true; - - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - return; - - /* SRAM - include stack CCM if driver knows the values for it */ - if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { - const struct fw_img *img; - - img = &mvm->fw->img[mvm->cur_ucode]; - sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; - sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; - } else { - sram_ofs = mvm->cfg->dccm_offset; - sram_len = mvm->cfg->dccm_len; - } - - /* reading RXF/TXF sizes */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; - int i; - - fifo_data_len = 0; - - /* Count RXF size */ - for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { - if (!mem_cfg->rxfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->rxfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) { - if (!mem_cfg->txfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->txfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 2 + - sram_len + sizeof(*dump_mem) + - fifo_data_len + - sizeof(*dump_info); - - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - - /* Make room for fw's virtual image pages, if it exists */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) - file_len += mvm->num_of_paging_blk * - (sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_paging) + - PAGING_BLOCK_SIZE); - - /* If we only want a monitor dump, reset the file length */ - if (monitor_dump_only) { - file_len = sizeof(*dump_file) + sizeof(*dump_data) + - sizeof(*dump_info); - } - - /* - * In 8000 HW family B-step include the ICCM (which resides separately) - */ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - IWL8260_ICCM_LEN; - - if (mvm->fw_dump_desc) - file_len += sizeof(*dump_data) + sizeof(*dump_trig) + - mvm->fw_dump_desc->len; - - dump_file = vzalloc(file_len); - if (!dump_file) { - kfree(fw_error_dump); - iwl_mvm_free_fw_dump_desc(mvm); - return; - } - - fw_error_dump->op_mode_ptr = dump_file; - - dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); - dump_data = (void *)dump_file->data; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_info)); - dump_info = (void *) dump_data->data; - dump_info->device_family = - mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, - sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, mvm->cfg->name, - sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, - sizeof(dump_info->bus_human_readable)); - - dump_data = iwl_fw_error_next_data(dump_data); - /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) - iwl_mvm_dump_fifos(mvm, &dump_data); - - if (mvm->fw_dump_desc) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_trig) + - mvm->fw_dump_desc->len); - dump_trig = (void *)dump_data->data; - memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, - sizeof(*dump_trig) + mvm->fw_dump_desc->len); - - /* now we can free this copy */ - iwl_mvm_free_fw_dump_desc(mvm); - dump_data = iwl_fw_error_next_data(dump_data); - } - - /* In case we only want monitor dump, skip to dump trasport data */ - if (monitor_dump_only) - goto dump_trans_data; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, - sram_len); - - if (smem_len) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, - dump_mem->data, smem_len); - } - - if (sram2_len) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, - dump_mem->data, sram2_len); - } - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + - sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); - iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, - dump_mem->data, IWL8260_ICCM_LEN); - } - - /* Dump fw's virtual image */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { - u32 i; - - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { - struct iwl_fw_error_dump_paging *paging; - struct page *pages = - mvm->fw_paging_db[i].fw_paging_block; - - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); - dump_data->len = cpu_to_le32(sizeof(*paging) + - PAGING_BLOCK_SIZE); - paging = (void *)dump_data->data; - paging->index = cpu_to_le32(i); - memcpy(paging->data, page_address(pages), - PAGING_BLOCK_SIZE); - } - } - -dump_trans_data: - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, - mvm->fw_dump_trig); - fw_error_dump->op_mode_len = file_len; - if (fw_error_dump->trans_ptr) - file_len += fw_error_dump->trans_ptr->len; - dump_file->file_len = cpu_to_le32(file_len); - - dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, - GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); - - mvm->fw_dump_trig = NULL; - clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); -} - -struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { - .trig_desc = { - .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), - }, -}; - static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { /* clear the D3 reconfig, we only need it to avoid dumping a @@ -1387,6 +973,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->calibrating = false; /* just in case one was running */ + iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); /* @@ -1399,6 +986,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; iwl_mvm_reset_phy_ctxts(mvm); + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); @@ -1427,10 +1015,18 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - /* Clean up some internal and mac80211 state on restart */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); - + } else { + /* Hold the reference to prevent runtime suspend while + * the start procedure runs. It's a bit confusing + * that the UCODE_DOWN reference is taken, but it just + * means "UCODE is not UP yet". ( TODO: rename this + * reference). + */ + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + } ret = iwl_mvm_up(mvm); if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { @@ -1497,15 +1093,13 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) { - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) - if (!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ)) - WARN_ONCE(1, "D0i3 exit on resume timed out\n"); + if (iwl_mvm_is_d0i3_supported(mvm) && + iwl_mvm_enter_d0i3_on_suspend(mvm)) + WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, + !test_bit(IWL_MVM_STATUS_IN_D0I3, + &mvm->status), + HZ), + "D0i3 exit on resume timed out\n"); } static void @@ -1533,14 +1127,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); - /* - * Disallow low power states when the FW is down by taking - * the UCODE_DOWN ref. in case of ongoing hw restart the - * ref is already taken, so don't take it again. - */ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - /* async_handlers_wk is now blocked */ /* @@ -2152,8 +1738,8 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + +static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) { struct iwl_bcast_filter_cmd cmd; @@ -2167,8 +1753,7 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, sizeof(cmd), &cmd); } #else -static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) { return 0; } @@ -2283,7 +1868,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, } iwl_mvm_recalc_multicast(mvm); - iwl_mvm_configure_bcast_filter(mvm, vif); + iwl_mvm_configure_bcast_filter(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; @@ -2291,6 +1876,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, IEEE80211_SMPS_AUTOMATIC); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so @@ -2331,7 +1919,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ARP_FILTER) { IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); - iwl_mvm_configure_bcast_filter(mvm, vif); + iwl_mvm_configure_bcast_filter(mvm); } } @@ -2661,7 +2249,6 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* @@ -2677,11 +2264,6 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-ENOENT)); - if (mvm_sta->vif->type == NL80211_IFTYPE_AP) { - mvmvif->ap_assoc_sta_count--; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - mutex_unlock(&mvm->mutex); } @@ -2699,6 +2281,34 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } +static void +iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, u8 *peer_addr, + enum nl80211_tdls_operation action) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_tdls *tdls_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); + tdls_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(tdls_trig->action_bitmap & BIT(action))) + return; + + if (tdls_trig->peer_mode && + memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "TDLS event occurred, peer %pM, action %d", + peer_addr, action); +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2749,8 +2359,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } ret = iwl_mvm_add_sta(mvm, vif, sta); - if (sta->tdls && ret == 0) + if (sta->tdls && ret == 0) { iwl_mvm_recalc_tdls_state(mvm, vif, true); + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_SETUP); + } } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* @@ -2762,6 +2375,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { + if (vif->type == NL80211_IFTYPE_AP) { + mvmvif->ap_assoc_sta_count++; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } ret = iwl_mvm_update_sta(mvm, vif, sta); if (ret == 0) iwl_mvm_rs_rate_init(mvm, sta, @@ -2774,6 +2391,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); + if (sta->tdls) + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_ENABLE_LINK); + /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); ret = 0; @@ -2784,6 +2405,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { + if (vif->type == NL80211_IFTYPE_AP) { + mvmvif->ap_assoc_sta_count--; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { @@ -2791,8 +2416,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { ret = iwl_mvm_rm_sta(mvm, vif, sta); - if (sta->tdls) + if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_DISABLE_LINK); + } } else { ret = -EIO; } @@ -2940,6 +2568,9 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + int keyidx = key->keyidx; int ret; u8 key_offset; @@ -3007,6 +2638,36 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + sta && iwl_mvm_has_new_rx_api(mvm) && + key->flags & IEEE80211_KEY_FLAG_PAIRWISE && + (key->cipher == WLAN_CIPHER_SUITE_CCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP)) { + struct ieee80211_key_seq seq; + int tid, q; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); + ptk_pn = kzalloc(sizeof(*ptk_pn) + + mvm->trans->num_rx_queues * + sizeof(ptk_pn->q[0]), + GFP_KERNEL); + if (!ptk_pn) { + ret = -ENOMEM; + break; + } + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + ieee80211_get_key_rx_seq(key, tid, &seq); + for (q = 0; q < mvm->trans->num_rx_queues; q++) + memcpy(ptk_pn->q[q].pn[tid], + seq.ccmp.pn, + IEEE80211_CCMP_PN_LEN); + } + + rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); + } + /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; @@ -3032,6 +2693,19 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } + if (sta && iwl_mvm_has_new_rx_api(mvm) && + key->flags & IEEE80211_KEY_FLAG_PAIRWISE && + (key->cipher == WLAN_CIPHER_SUITE_CCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP)) { + mvmsta = iwl_mvm_sta_from_mac80211(sta); + ptk_pn = rcu_dereference_protected( + mvmsta->ptk_pn[keyidx], + lockdep_is_held(&mvm->mutex)); + RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); + if (ptk_pn) + kfree_rcu(ptk_pn, rcu_head); + } + IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; @@ -3092,7 +2766,11 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, return true; } -#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200 +#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) +#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) +#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) +#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) +#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, @@ -3103,6 +2781,9 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; + u32 dtim_interval = vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int; + u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = @@ -3115,11 +2796,38 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, .channel_info.width = PHY_VHT_CHANNEL_MODE20, /* Set the time and duration */ .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), - .apply_time_max_delay = - cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)), - .duration = cpu_to_le32(MSEC_TO_TU(duration)), }; + delay = AUX_ROC_MIN_DELAY; + req_dur = MSEC_TO_TU(duration); + + /* + * If we are associated we want the delay time to be at least one + * dtim interval so that the FW can wait until after the DTIM and + * then start the time event, this will potentially allow us to + * remain off-channel for the max duration. + * Since we want to use almost a whole dtim interval we would also + * like the delay to be for 2-3 dtim intervals, in case there are + * other time events with higher priority. + */ + if (vif->bss_conf.assoc) { + delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); + /* We cannot remain off-channel longer than the DTIM interval */ + if (dtim_interval <= req_dur) { + req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; + if (req_dur <= AUX_ROC_MIN_DURATION) + req_dur = dtim_interval - + AUX_ROC_MIN_SAFETY_BUFFER; + } + } + + aux_roc_req.duration = cpu_to_le32(req_dur); + aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, + "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", + channel->hw_value, req_dur, duration, delay, + dtim_interval); /* Set the node address */ memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); @@ -3467,6 +3175,11 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; + + ret = iwl_mvm_add_snif_sta(mvm, vif); + if (ret) + goto out_remove_binding; + } /* Handle binding during CSA */ @@ -3540,6 +3253,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; + iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4bde2d027dcd..5f3ac8cccf49 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -157,7 +157,7 @@ struct iwl_mvm_dump_desc { struct iwl_fw_error_dump_trigger_desc trig_desc; }; -extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; +extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; struct iwl_mvm_phy_ctxt { u16 id; @@ -294,6 +294,7 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_EXIT_WORK, IWL_MVM_REF_PROTECT_CSA, IWL_MVM_REF_FW_DBG_COLLECT, + IWL_MVM_REF_INIT_UCODE, /* update debugfs.c when changing this */ @@ -404,7 +405,7 @@ struct iwl_mvm_vif { */ struct iwl_mvm_phy_ctxt *phy_ctxt; -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; @@ -421,6 +422,7 @@ struct iwl_mvm_vif { #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; + unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; int num_target_ipv6_addrs; #endif @@ -475,6 +477,14 @@ enum iwl_scan_status { IWL_MVM_SCAN_MASK = 0xff, }; +enum iwl_mvm_scan_type { + IWL_SCAN_TYPE_NOT_SET, + IWL_SCAN_TYPE_UNASSOC, + IWL_SCAN_TYPE_WILD, + IWL_SCAN_TYPE_MILD, + IWL_SCAN_TYPE_FRAGMENTED, +}; + /** * struct iwl_nvm_section - describes an NVM section in memory. * @@ -643,10 +653,14 @@ struct iwl_mvm { unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; + enum iwl_mvm_scan_type scan_type; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; + /* ts of the beginning of a non-collect fw dbg data period */ + unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; @@ -666,6 +680,7 @@ struct iwl_mvm { /* Internal station */ struct iwl_mvm_int_sta aux_sta; + struct iwl_mvm_int_sta snif_sta; bool last_ebs_successful; @@ -717,8 +732,8 @@ struct iwl_mvm { s8 restart_fw; u8 fw_dbg_conf; struct delayed_work fw_dump_wk; - struct iwl_mvm_dump_desc *fw_dump_desc; - struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; + const struct iwl_mvm_dump_desc *fw_dump_desc; + const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -726,12 +741,11 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; /* sched scan settings for net detect */ - struct cfg80211_sched_scan_request *nd_config; struct ieee80211_scan_ies nd_ies; struct cfg80211_match_set *nd_match_sets; int n_nd_match_sets; @@ -813,8 +827,6 @@ struct iwl_mvm { bool lar_regdom_set; enum iwl_mcc_source mcc_src; - u8 low_latency_agg_frame_limit; - /* TDLS channel switch data */ struct { struct delayed_work dwork; @@ -915,11 +927,9 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) { - return mvm->trans->cfg->d0i3 && - mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF && - !iwlwifi_mod_params.d0i3_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); + return !iwlwifi_mod_params.d0i3_disable && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) @@ -928,6 +938,19 @@ static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_DQA_SUPPORT); } +static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) +{ + /* For now we only use this mode to differentiate between + * slave transports, which handle D0i3 entry in suspend by + * themselves in conjunction with runtime PM D0i3. So, this + * function is used to check whether we need to do anything + * when entering suspend or if the transport layer has already + * done it. + */ + return (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) && + (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); +} + static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; @@ -975,6 +998,13 @@ static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); } +static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && + IWL_MVM_BT_COEX_MPLUT; +} + static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { /* firmware flag isn't defined yet */ @@ -1108,6 +1138,11 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue); +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, @@ -1256,10 +1291,31 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM +int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool host_awake, + u32 cmd_flags); +void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status *status); void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else +static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool host_awake, + u32 cmd_flags) +{ + return 0; +} + +static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status *status) +{ +} + static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1270,6 +1326,7 @@ void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, + bool offload_ns, u32 cmd_flags); /* D0i3 */ @@ -1376,6 +1433,15 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); +/* Return a bitmask with all the hw supported queues, except for the + * command queue, which can't be flushed. + */ +static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) +{ + return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & + ~BIT(IWL_MVM_CMD_QUEUE)); +} + static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 fifo, u16 ssn, unsigned int wdg_timeout) @@ -1468,68 +1534,10 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); - -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - struct iwl_mvm_dump_desc *desc, - struct iwl_fw_dbg_trigger_tlv *trigger); -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) __printf(3, 4); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); -static inline bool -iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, - struct ieee80211_vif *vif) -{ - u32 trig_vif = le32_to_cpu(trig->vif_type); - - return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; -} - -static inline bool -iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && - (mvm->fw_dbg_conf == FW_DBG_INVALID || - (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); -} - -static inline bool -iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) - return false; - - return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); -} - -static inline void -iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - enum iwl_fw_dbg_trigger trig) -{ - struct iwl_fw_dbg_trigger_tlv *trigger; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig)) - return; - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig); - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); -} #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 2ee0f6fe56a1..7a3da2da6fd0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -104,13 +106,35 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .len = { sizeof(struct iwl_nvm_access_cmd), length }, - .flags = CMD_SEND_IN_RFKILL, + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, data }, /* data may come from vmalloc, so use _DUP */ .dataflags = { 0, IWL_HCMD_DFL_DUP }, }; + struct iwl_rx_packet *pkt; + struct iwl_nvm_access_resp *nvm_resp; + int ret; - return iwl_mvm_send_cmd(mvm, &cmd); + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + if (!pkt) { + IWL_ERR(mvm, "Error in NVM_ACCESS response\n"); + return -EINVAL; + } + /* Extract & check NVM write response */ + nvm_resp = (void *)pkt->data; + if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { + IWL_ERR(mvm, + "NVM access write command failed for section %u (status = 0x%x)\n", + section, le16_to_cpu(nvm_resp->status)); + ret = -EIO; + } + + iwl_free_resp(&cmd); + return ret; } static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, @@ -210,6 +234,19 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, return 0; } +static void iwl_mvm_nvm_fixups(struct iwl_mvm *mvm, unsigned int section, + u8 *data, unsigned int len) +{ +#define IWL_4165_DEVICE_ID 0x5501 +#define NVM_SKU_CAP_MIMO_DISABLE BIT(5) + + if (section == NVM_SECTION_TYPE_PHY_SKU && + mvm->trans->hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && + (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) + /* OTP 0x52 bug work around: it's a 1x1 device */ + data[3] = ANT_B | (ANT_B << 4); +} + /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read @@ -250,6 +287,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } + iwl_mvm_nvm_fixups(mvm, section, data, offset); + IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); return offset; @@ -316,8 +355,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled, mac_addr0, mac_addr1, - mvm->trans->hw_id); + lar_enabled, mac_addr0, mac_addr1); } #define MAX_NVM_FILE_LEN 16384 @@ -353,7 +391,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) __le16 word2; u8 data[]; } *file_sec; - const u8 *eof, *temp; + const u8 *eof; + u8 *temp; int max_section_size; const __le32 *dword_buff; @@ -483,6 +522,9 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) ret = -ENOMEM; break; } + + iwl_mvm_nvm_fixups(mvm, section_id, temp, section_size); + kfree(mvm->nvm_sections[section_id].data); mvm->nvm_sections[section_id].data = temp; mvm->nvm_sections[section_id].length = section_size; @@ -548,6 +590,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) ret = -ENOMEM; break; } + + iwl_mvm_nvm_fixups(mvm, section, temp, ret); + mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; @@ -597,7 +642,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) else mvm->nvm_file_name = nvm_file_C; - if (ret == -EFAULT && mvm->nvm_file_name) { + if ((ret == -EFAULT || ret == -ENOENT) && + mvm->nvm_file_name) { /* in case nvm file was failed try again */ ret = iwl_mvm_read_external_nvm(mvm); if (ret) @@ -627,6 +673,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, .source_id = (u8)src_id, }; struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL; + struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = NULL; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, @@ -638,11 +685,15 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, u32 status; int resp_len, n_channels; u16 mcc; + bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); + if (!resp_v2) + cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); @@ -654,31 +705,50 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, pkt = cmd.resp_pkt; /* Extract MCC response */ - mcc_resp = (void *)pkt->data; - status = le32_to_cpu(mcc_resp->status); + if (resp_v2) { + mcc_resp = (void *)pkt->data; + n_channels = __le32_to_cpu(mcc_resp->n_channels); + } else { + mcc_resp_v1 = (void *)pkt->data; + n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); + } + + resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * + sizeof(__le32); + + resp_cp = kzalloc(resp_len, GFP_KERNEL); + if (!resp_cp) { + ret = -ENOMEM; + goto exit; + } + + if (resp_v2) { + memcpy(resp_cp, mcc_resp, resp_len); + } else { + resp_cp->status = mcc_resp_v1->status; + resp_cp->mcc = mcc_resp_v1->mcc; + resp_cp->cap = mcc_resp_v1->cap; + resp_cp->source_id = mcc_resp_v1->source_id; + resp_cp->n_channels = mcc_resp_v1->n_channels; + memcpy(resp_cp->channels, mcc_resp_v1->channels, + n_channels * sizeof(__le32)); + } + + status = le32_to_cpu(resp_cp->status); - mcc = le16_to_cpu(mcc_resp->mcc); + mcc = le16_to_cpu(resp_cp->mcc); /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ if (mcc == 0) { mcc = 0x3030; /* "00" - world */ - mcc_resp->mcc = cpu_to_le16(mcc); + resp_cp->mcc = cpu_to_le16(mcc); } - n_channels = __le32_to_cpu(mcc_resp->n_channels); IWL_DEBUG_LAR(mvm, "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", status, mcc, mcc >> 8, mcc & 0xff, !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); - resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32); - resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); - if (!resp_cp) { - ret = -ENOMEM; - goto exit; - } - - ret = 0; exit: iwl_free_resp(&cmd); if (ret) diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index 68b0169c8892..6338d9cf7070 100644 --- a/drivers/net/wireless/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,6 +66,7 @@ *****************************************************************************/ #include <net/ipv6.h> #include <net/addrconf.h> +#include <linux/bitops.h> #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, @@ -86,6 +89,7 @@ void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, + bool offload_ns, u32 cmd_flags) { union { @@ -106,6 +110,13 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, #if IS_ENABLED(CONFIG_IPV6) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int i; + /* + * Skip tentative address when ns offload is enabled to avoid + * violating RFC4862. + * Keep tentative address when ns offload is disabled so the NS packets + * will not be filtered out and will wake up the host. + */ + bool skip_tentative = offload_ns; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { @@ -113,6 +124,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct iwl_targ_addr *addrs; int n_nsc, n_addrs; int c; + int num_skipped = 0; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { nsc = cmd.v3s.ns_config; @@ -126,9 +138,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; } - if (mvmvif->num_target_ipv6_addrs) - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - /* * For each address we have (and that will fit) fill a target * address struct and combine for NS offload structs with the @@ -140,6 +149,12 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct in6_addr solicited_addr; int j; + if (skip_tentative && + test_bit(i, mvmvif->tentative_addrs)) { + num_skipped++; + continue; + } + addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], &solicited_addr); for (j = 0; j < c; j++) @@ -154,41 +169,64 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); } + if (mvmvif->num_target_ipv6_addrs - num_skipped) + enabled |= IWL_D3_PROTO_IPV6_VALID; + if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) - cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i); + cmd.v3s.num_valid_ipv6_addrs = + cpu_to_le32(i - num_skipped); else - cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i); + cmd.v3l.num_valid_ipv6_addrs = + cpu_to_le32(i - num_skipped); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { - if (mvmvif->num_target_ipv6_addrs) { - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); - } + bool found = false; BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, - IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) { + if (skip_tentative && + test_bit(i, mvmvif->tentative_addrs)) + continue; + memcpy(cmd.v2.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v2.target_ipv6_addr[i])); - } else { - if (mvmvif->num_target_ipv6_addrs) { - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); - } + found = true; + } + if (found) { + enabled |= IWL_D3_PROTO_IPV6_VALID; + memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); + } + } else { + bool found = false; BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, - IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) { + if (skip_tentative && + test_bit(i, mvmvif->tentative_addrs)) + continue; + memcpy(cmd.v1.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v1.target_ipv6_addr[i])); + + found = true; + } + + if (found) { + enabled |= IWL_D3_PROTO_IPV6_VALID; + memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); + } } -#endif + if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) + enabled |= IWL_D3_PROTO_OFFLOAD_NS; +#endif if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { common = &cmd.v3s.common; size = sizeof(cmd.v3s); @@ -204,7 +242,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, } if (vif->bss_conf.arp_addr_cnt) { - enabled |= IWL_D3_PROTO_OFFLOAD_ARP; + enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); } diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 13c97f665ba8..89ea70deeb84 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -82,6 +82,9 @@ #include "rs.h" #include "fw-api-scan.h" #include "time-event.h" +#include "fw-dbg.h" +#include "fw-api.h" +#include "fw-api-scan.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -268,102 +271,127 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { }; #undef RX_HANDLER #undef RX_HANDLER_GRP -#define CMD(x) [x] = #x - -static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = { - CMD(MVM_ALIVE), - CMD(REPLY_ERROR), - CMD(ECHO_CMD), - CMD(INIT_COMPLETE_NOTIF), - CMD(PHY_CONTEXT_CMD), - CMD(MGMT_MCAST_KEY), - CMD(TX_CMD), - CMD(TXPATH_FLUSH), - CMD(SHARED_MEM_CFG), - CMD(MAC_CONTEXT_CMD), - CMD(TIME_EVENT_CMD), - CMD(TIME_EVENT_NOTIFICATION), - CMD(BINDING_CONTEXT_CMD), - CMD(TIME_QUOTA_CMD), - CMD(NON_QOS_TX_COUNTER_CMD), - CMD(DC2DC_CONFIG_CMD), - CMD(NVM_ACCESS_CMD), - CMD(PHY_CONFIGURATION_CMD), - CMD(CALIB_RES_NOTIF_PHY_DB), - CMD(SET_CALIB_DEFAULT_CMD), - CMD(FW_PAGING_BLOCK_CMD), - CMD(ADD_STA_KEY), - CMD(ADD_STA), - CMD(FW_GET_ITEM_CMD), - CMD(REMOVE_STA), - CMD(LQ_CMD), - CMD(SCAN_OFFLOAD_CONFIG_CMD), - CMD(MATCH_FOUND_NOTIFICATION), - CMD(SCAN_OFFLOAD_REQUEST_CMD), - CMD(SCAN_OFFLOAD_ABORT_CMD), - CMD(HOT_SPOT_CMD), - CMD(SCAN_OFFLOAD_COMPLETE), - CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), - CMD(SCAN_ITERATION_COMPLETE), - CMD(POWER_TABLE_CMD), - CMD(WEP_KEY), - CMD(REPLY_RX_PHY_CMD), - CMD(REPLY_RX_MPDU_CMD), - CMD(BEACON_NOTIFICATION), - CMD(BEACON_TEMPLATE_CMD), - CMD(STATISTICS_CMD), - CMD(STATISTICS_NOTIFICATION), - CMD(EOSP_NOTIFICATION), - CMD(REDUCE_TX_POWER_CMD), - CMD(TX_ANT_CONFIGURATION_CMD), - CMD(D3_CONFIG_CMD), - CMD(D0I3_END_CMD), - CMD(PROT_OFFLOAD_CONFIG_CMD), - CMD(OFFLOADS_QUERY_CMD), - CMD(REMOTE_WAKE_CONFIG_CMD), - CMD(WOWLAN_PATTERNS), - CMD(WOWLAN_CONFIGURATION), - CMD(WOWLAN_TSC_RSC_PARAM), - CMD(WOWLAN_TKIP_PARAM), - CMD(WOWLAN_KEK_KCK_MATERIAL), - CMD(WOWLAN_GET_STATUSES), - CMD(WOWLAN_TX_POWER_PER_DB), - CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD), - CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), - CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), - CMD(CARD_STATE_NOTIFICATION), - CMD(MISSED_BEACONS_NOTIFICATION), - CMD(BT_COEX_PRIO_TABLE), - CMD(BT_COEX_PROT_ENV), - CMD(BT_PROFILE_NOTIFICATION), - CMD(BT_CONFIG), - CMD(MCAST_FILTER_CMD), - CMD(BCAST_FILTER_CMD), - CMD(REPLY_SF_CFG_CMD), - CMD(REPLY_BEACON_FILTERING_CMD), - CMD(CMD_DTS_MEASUREMENT_TRIGGER), - CMD(DTS_MEASUREMENT_NOTIFICATION), - CMD(REPLY_THERMAL_MNG_BACKOFF), - CMD(MAC_PM_POWER_TABLE), - CMD(LTR_CONFIG), - CMD(BT_COEX_CI), - CMD(BT_COEX_UPDATE_SW_BOOST), - CMD(BT_COEX_UPDATE_CORUN_LUT), - CMD(BT_COEX_UPDATE_REDUCED_TXP), - CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), - CMD(ANTENNA_COUPLING_NOTIFICATION), - CMD(SCD_QUEUE_CFG), - CMD(SCAN_CFG_CMD), - CMD(SCAN_REQ_UMAC), - CMD(SCAN_ABORT_UMAC), - CMD(SCAN_COMPLETE_UMAC), - CMD(TDLS_CHANNEL_SWITCH_CMD), - CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), - CMD(TDLS_CONFIG_CMD), - CMD(MCC_UPDATE_CMD), - CMD(SCAN_ITERATION_COMPLETE_UMAC), + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { + HCMD_NAME(MVM_ALIVE), + HCMD_NAME(REPLY_ERROR), + HCMD_NAME(ECHO_CMD), + HCMD_NAME(INIT_COMPLETE_NOTIF), + HCMD_NAME(PHY_CONTEXT_CMD), + HCMD_NAME(DBG_CFG), + HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION), + HCMD_NAME(SCAN_CFG_CMD), + HCMD_NAME(SCAN_REQ_UMAC), + HCMD_NAME(SCAN_ABORT_UMAC), + HCMD_NAME(SCAN_COMPLETE_UMAC), + HCMD_NAME(TOF_CMD), + HCMD_NAME(TOF_NOTIFICATION), + HCMD_NAME(ADD_STA_KEY), + HCMD_NAME(ADD_STA), + HCMD_NAME(REMOVE_STA), + HCMD_NAME(FW_GET_ITEM_CMD), + HCMD_NAME(TX_CMD), + HCMD_NAME(SCD_QUEUE_CFG), + HCMD_NAME(TXPATH_FLUSH), + HCMD_NAME(MGMT_MCAST_KEY), + HCMD_NAME(WEP_KEY), + HCMD_NAME(SHARED_MEM_CFG), + HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), + HCMD_NAME(MAC_CONTEXT_CMD), + HCMD_NAME(TIME_EVENT_CMD), + HCMD_NAME(TIME_EVENT_NOTIFICATION), + HCMD_NAME(BINDING_CONTEXT_CMD), + HCMD_NAME(TIME_QUOTA_CMD), + HCMD_NAME(NON_QOS_TX_COUNTER_CMD), + HCMD_NAME(LQ_CMD), + HCMD_NAME(FW_PAGING_BLOCK_CMD), + HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), + HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), + HCMD_NAME(HOT_SPOT_CMD), + HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), + HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), + HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), + HCMD_NAME(BT_COEX_UPDATE_SW_BOOST), + HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT), + HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), + HCMD_NAME(BT_COEX_CI), + HCMD_NAME(PHY_CONFIGURATION_CMD), + HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), + HCMD_NAME(SCAN_OFFLOAD_COMPLETE), + HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), + HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD), + HCMD_NAME(POWER_TABLE_CMD), + HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), + HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), + HCMD_NAME(DC2DC_CONFIG_CMD), + HCMD_NAME(NVM_ACCESS_CMD), + HCMD_NAME(SET_CALIB_DEFAULT_CMD), + HCMD_NAME(BEACON_NOTIFICATION), + HCMD_NAME(BEACON_TEMPLATE_CMD), + HCMD_NAME(TX_ANT_CONFIGURATION_CMD), + HCMD_NAME(BT_CONFIG), + HCMD_NAME(STATISTICS_CMD), + HCMD_NAME(STATISTICS_NOTIFICATION), + HCMD_NAME(EOSP_NOTIFICATION), + HCMD_NAME(REDUCE_TX_POWER_CMD), + HCMD_NAME(CARD_STATE_CMD), + HCMD_NAME(CARD_STATE_NOTIFICATION), + HCMD_NAME(MISSED_BEACONS_NOTIFICATION), + HCMD_NAME(TDLS_CONFIG_CMD), + HCMD_NAME(MAC_PM_POWER_TABLE), + HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), + HCMD_NAME(MFUART_LOAD_NOTIFICATION), + HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), + HCMD_NAME(REPLY_RX_PHY_CMD), + HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(BA_NOTIF), + HCMD_NAME(MCC_UPDATE_CMD), + HCMD_NAME(MCC_CHUB_UPDATE_CMD), + HCMD_NAME(MARKER_CMD), + HCMD_NAME(BT_COEX_PRIO_TABLE), + HCMD_NAME(BT_COEX_PROT_ENV), + HCMD_NAME(BT_PROFILE_NOTIFICATION), + HCMD_NAME(BCAST_FILTER_CMD), + HCMD_NAME(MCAST_FILTER_CMD), + HCMD_NAME(REPLY_SF_CFG_CMD), + HCMD_NAME(REPLY_BEACON_FILTERING_CMD), + HCMD_NAME(D3_CONFIG_CMD), + HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), + HCMD_NAME(OFFLOADS_QUERY_CMD), + HCMD_NAME(REMOTE_WAKE_CONFIG_CMD), + HCMD_NAME(MATCH_FOUND_NOTIFICATION), + HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER), + HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), + HCMD_NAME(WOWLAN_PATTERNS), + HCMD_NAME(WOWLAN_CONFIGURATION), + HCMD_NAME(WOWLAN_TSC_RSC_PARAM), + HCMD_NAME(WOWLAN_TKIP_PARAM), + HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), + HCMD_NAME(WOWLAN_GET_STATUSES), + HCMD_NAME(WOWLAN_TX_POWER_PER_DB), + HCMD_NAME(SCAN_ITERATION_COMPLETE), + HCMD_NAME(D0I3_END_CMD), + HCMD_NAME(LTR_CONFIG), + HCMD_NAME(REPLY_DEBUG_CMD), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { + HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; -#undef CMD + +static const struct iwl_hcmd_arr iwl_mvm_groups[] = { + [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), + [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), + [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), +}; + /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); @@ -452,7 +480,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->first_agg_queue = 12; } mvm->sf_state = SF_UNINIT; - mvm->low_latency_agg_frame_limit = 6; mvm->cur_ucode = IWL_UCODE_INIT; mutex_init(&mvm->mutex); @@ -485,20 +512,36 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_4K: + trans_cfg.rx_buf_size = IWL_AMSDU_4K; + break; + case IWL_AMSDU_8K: + trans_cfg.rx_buf_size = IWL_AMSDU_8K; + break; + case IWL_AMSDU_12K: + trans_cfg.rx_buf_size = IWL_AMSDU_12K; + break; + default: + pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, + iwlwifi_mod_params.amsdu_size); + trans_cfg.rx_buf_size = IWL_AMSDU_4K; + } trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR); if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) trans_cfg.bc_table_dword = true; - trans_cfg.command_names = iwl_mvm_cmd_strings; + trans_cfg.command_groups = iwl_mvm_groups; + trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; + trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; /* Set a short watchdog for the command queue */ trans_cfg.cmd_q_wdg_timeout = @@ -561,9 +604,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; mutex_lock(&mvm->mutex); + iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) iwl_trans_stop_device(trans); + iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); /* returns 0 if successful, 1 if success but in rfkill */ if (err < 0 && !iwlmvm_mod_params.init_dbg) { @@ -591,8 +636,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); - /* rpm starts with a taken ref. only set the appropriate bit here. */ - mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; + /* rpm starts with a taken reference, we can release it now */ + iwl_trans_unref(mvm->trans); iwl_mvm_tof_init(mvm); @@ -628,12 +673,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); - if (mvm->nd_config) { - kfree(mvm->nd_config->match_sets); - kfree(mvm->nd_config->scan_plans); - kfree(mvm->nd_config); - mvm->nd_config = NULL; - } #endif iwl_trans_op_mode_leave(mvm->trans); @@ -783,6 +822,8 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode, if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + else if (pkt->hdr.cmd == FRAME_RELEASE) + iwl_mvm_rx_frame_release(mvm, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else @@ -797,9 +838,9 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) - iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) - iwl_mvm_rx_rx_phy_cmd(mvm, rxb); + iwl_mvm_rx_phy_cmd_mq(mvm, rxb); else iwl_mvm_rx_common(mvm, rxb, pkt); } @@ -829,6 +870,18 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) } } +static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, + const struct iwl_device_cmd *cmd) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + /* + * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA + * commands that need to block the Tx queues. + */ + iwl_trans_block_txq_ptrs(mvm->trans, false); +} + static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1023,6 +1076,7 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) struct iwl_d0i3_iter_data { struct iwl_mvm *mvm; + struct ieee80211_vif *connected_vif; u8 ap_sta_id; u8 vif_count; u8 offloading_tid; @@ -1103,7 +1157,8 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, data->disable_offloading = true; iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); - iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags); + iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, + false, flags); /* * on init/association, mvm already configures POWER_TABLE_CMD @@ -1113,6 +1168,12 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, */ data->ap_sta_id = mvmvif->ap_sta_id; data->vif_count++; + + /* + * no new commands can be sent at this stage, so it's safe + * to save the vif pointer during d0i3 entrance. + */ + data->connected_vif = vif; } static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, @@ -1134,7 +1195,8 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; cmd->offloading_tid = iter_data->offloading_tid; - + cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | + ENABLE_DHCP_FILTERING; /* * The d0i3 uCode takes care of the nonqos counters, * so configure only the qos seq ones. @@ -1165,6 +1227,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); + if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + return -EINVAL; + set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); /* @@ -1196,8 +1261,17 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) /* make sure we have no running tx while configuring the seqno */ synchronize_net(); + /* Flush the hw queues, in case something got queued during entry */ + ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags); + if (ret) + return ret; + /* configure wowlan configuration only if needed */ if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + iwl_mvm_wowlan_config_key_params(mvm, + d0i3_iter_data.connected_vif, + true, flags); + iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data); @@ -1227,25 +1301,30 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); } -struct iwl_mvm_wakeup_reason_iter_data { +struct iwl_mvm_d0i3_exit_work_iter_data { struct iwl_mvm *mvm; + struct iwl_wowlan_status *status; u32 wakeup_reasons; }; -static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - struct iwl_mvm_wakeup_reason_iter_data *data = _data; + struct iwl_mvm_d0i3_exit_work_iter_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 reasons = data->wakeup_reasons; - if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc && - data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) { - if (data->wakeup_reasons & - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) - iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); - else - ieee80211_beacon_loss(vif); - } + /* consider only the relevant station interface */ + if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || + data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) + return; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) + iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); + else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) + ieee80211_beacon_loss(vif); + else + iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); } void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) @@ -1311,9 +1390,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) .id = WOWLAN_GET_STATUSES, .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, }; + struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { + .mvm = mvm, + }; + struct iwl_wowlan_status *status; int ret; - u32 handled_reasons, wakeup_reasons = 0; + u32 wakeup_reasons = 0; __le16 *qos_seq = NULL; mutex_lock(&mvm->mutex); @@ -1330,18 +1413,12 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); - handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; - if (wakeup_reasons & handled_reasons) { - struct iwl_mvm_wakeup_reason_iter_data data = { - .mvm = mvm, - .wakeup_reasons = wakeup_reasons, - }; - - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d0i3_wakeup_reason_iter, &data); - } + iter_data.wakeup_reasons = wakeup_reasons; + iter_data.status = status; + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d0i3_exit_work_iter, + &iter_data); out: iwl_mvm_d0i3_enable_tx(mvm, qos_seq); @@ -1367,6 +1444,9 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); + if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + return -EINVAL; + mutex_lock(&mvm->d0i3_suspend_mutex); if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); @@ -1399,6 +1479,7 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ + .async_cb = iwl_mvm_async_cb, \ .queue_full = iwl_mvm_stop_sw_queue, \ .queue_not_full = iwl_mvm_wake_sw_queue, \ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ @@ -1423,8 +1504,12 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, unsigned int queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct iwl_rx_packet *pkt = rxb_addr(rxb); - iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) + iwl_mvm_rx_frame_release(mvm, rxb, queue); + else + iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index e68a475e3071..6e6a56f2153d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index bed9696ee410..9de159f1ef2d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -613,8 +613,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, break; case NL80211_IFTYPE_STATION: - /* only a single MAC of the same type */ - WARN_ON(power_iterator->bss_vif); power_iterator->bss_vif = vif; if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 509a66d05245..0b762b4f8fad 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index d1ad10391b47..7bb6fd0e4391 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -20,7 +20,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -552,9 +552,10 @@ static char *rs_pretty_rate(const struct rs_rate *rate) }; const char *rate_str; - if (is_type_legacy(rate->type)) + if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) rate_str = legacy_rates[rate->index]; - else if (is_type_ht(rate->type) || is_type_vht(rate->type)) + else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && + (rate->index <= IWL_RATE_MCS_9_INDEX)) rate_str = ht_vht_rates[rate->index]; else rate_str = "BAD_RATE"; @@ -1827,7 +1828,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; rate_mask = lq_sta->active_mimo2_rate; } else { - WARN_ON_ONCE("Bad column mode"); + WARN_ONCE(1, "Bad column mode"); } if (column->mode != RS_LEGACY) { @@ -2550,6 +2551,8 @@ static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { { S8_MIN, IWL_RATE_MCS_0_INDEX }, }; +#define IWL_RS_LOW_RSSI_THRESHOLD (-76) /* dBm */ + /* Init the optimal rate based on STA caps * This combined with rssi is used to report the last tx rate * to userspace when we haven't transmitted enough frames. @@ -2635,11 +2638,13 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, * of last Rx */ static void rs_get_initial_rate(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum ieee80211_band band, struct rs_rate *rate) { int i, nentries; + unsigned long active_rate; s8 best_rssi = S8_MIN; u8 best_ant = ANT_NONE; u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); @@ -2680,19 +2685,55 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); } - if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { - for (i = 0; i < nentries; i++) { - int rate_idx = initial_rates[i].rate_idx; - if ((best_rssi >= initial_rates[i].rssi) && - (BIT(rate_idx) & lq_sta->active_legacy_rate)) { - rate->index = rate_idx; - break; - } + if (!IWL_MVM_RS_RSSI_BASED_INIT_RATE) + goto out; + + /* Start from a higher rate if the corresponding debug capability + * is enabled. The rate is chosen according to AP capabilities. + * In case of VHT/HT when the rssi is low fallback to the case of + * legacy rates. + */ + if (sta->vht_cap.vht_supported && + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { + if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { + initial_rates = rs_optimal_rates_vht_40_80mhz; + nentries = ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); + if (sta->bandwidth >= IEEE80211_STA_RX_BW_80) + rate->bw = RATE_MCS_CHAN_WIDTH_80; + else + rate->bw = RATE_MCS_CHAN_WIDTH_40; + } else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + initial_rates = rs_optimal_rates_vht_20mhz; + nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); + rate->bw = RATE_MCS_CHAN_WIDTH_20; + } else { + IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth); + goto out; + } + active_rate = lq_sta->active_siso_rate; + rate->type = LQ_VHT_SISO; + } else if (sta->ht_cap.ht_supported && + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { + initial_rates = rs_optimal_rates_ht; + nentries = ARRAY_SIZE(rs_optimal_rates_ht); + active_rate = lq_sta->active_siso_rate; + rate->type = LQ_HT_SISO; + } else { + active_rate = lq_sta->active_legacy_rate; + } + + for (i = 0; i < nentries; i++) { + int rate_idx = initial_rates[i].rate_idx; + + if ((best_rssi >= initial_rates[i].rssi) && + (BIT(rate_idx) & active_rate)) { + rate->index = rate_idx; + break; } } - IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index, - rs_pretty_ant(rate->ant)); +out: + rs_dump_rate(mvm, rate, "INITIAL"); } /* Save info about RSSI of last Rx */ @@ -2752,14 +2793,11 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, lq_sta, band, rate); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); - if (rate->ant == ANT_A) - tbl->column = RS_COLUMN_LEGACY_ANT_A; - else - tbl->column = RS_COLUMN_LEGACY_ANT_B; + tbl->column = rs_get_column_from_rate(rate); rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); @@ -3454,15 +3492,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, * Tx Fifo so that it can start a transaction in the same TxOP. This * basically allows the firmware to send bursts. */ - if (iwl_mvm_vif_low_latency(mvmvif)) { + if (iwl_mvm_vif_low_latency(mvmvif)) lq_cmd->agg_frame_cnt_limit--; - if (mvm->low_latency_agg_frame_limit) - lq_cmd->agg_frame_cnt_limit = - min(lq_cmd->agg_frame_cnt_limit, - mvm->low_latency_agg_frame_limit); - } - if (mvmsta->vif->p2p) lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 81314ad9ebe0..bdb6f2d8d854 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -20,7 +20,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 5b58f5320e8d..145ec68ce6f9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -61,10 +61,12 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include <linux/etherdevice.h> #include <linux/skbuff.h> #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" +#include "fw-dbg.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler @@ -261,7 +263,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; - struct ieee80211_sta *sta; + struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u32 len; u32 ampdu_status; @@ -332,22 +334,33 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, (unsigned long long)rx_status->mactime); rcu_read_lock(); - /* - * We have tx blocked stations (with CS bit). If we heard frames from - * a blocked station on a new channel we can TX to it again. - */ - if (unlikely(mvm->csa_tx_block_bcn_timeout)) { - sta = ieee80211_find_sta( - rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2); - if (sta) - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) { + u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK; + + id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; + + if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); + if (IS_ERR(sta)) + sta = NULL; + } + } else if (!is_multicast_ether_addr(hdr->addr2)) { + /* This is fine since we prevent two stations with the same + * address from being added. + */ + sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } - /* This is fine since we don't support multiple AP interfaces */ - sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + /* We have tx blocked stations (with CS bit). If we heard + * frames from a blocked station on a new channel we can + * TX to it again. + */ + if (unlikely(mvm->csa_tx_block_bcn_timeout)) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && @@ -368,11 +381,10 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, if (trig_check && rx_status->signal < rssi) iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); } - } - - if (sta && ieee80211_is_data(hdr->frame_control)) - iwl_mvm_rx_csum(sta, skb, rx_pkt_status); + if (ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, rx_pkt_status); + } rcu_read_unlock(); /* set the preamble flag if appropriate */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c new file mode 100644 index 000000000000..0c073e02fd4c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -0,0 +1,458 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include "iwl-trans.h" +#include "mvm.h" +#include "fw-api.h" +#include "fw-dbg.h" + +void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + mvm->ampdu_ref++; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { + spin_lock(&mvm->drv_stats_lock); + mvm->drv_rx_stats.ampdu_count++; + spin_unlock(&mvm->drv_stats_lock); + } +#endif +} + +static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, + int queue, struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); + struct iwl_mvm_key_pn *ptk_pn; + u8 tid, keyidx; + u8 pn[IEEE80211_CCMP_PN_LEN]; + u8 *extiv; + + /* do PN checking */ + + /* multicast and non-data only arrives on default queue */ + if (!ieee80211_is_data(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return 0; + + /* do not check PN for open AP */ + if (!(stats->flag & RX_FLAG_DECRYPTED)) + return 0; + + /* + * avoid checking for default queue - we don't want to replicate + * all the logic that's necessary for checking the PN on fragmented + * frames, leave that to mac80211 + */ + if (queue == 0) + return 0; + + /* if we are here - this for sure is either CCMP or GCMP */ + if (IS_ERR_OR_NULL(sta)) { + IWL_ERR(mvm, + "expected hw-decrypted unicast frame for station\n"); + return -1; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + keyidx = extiv[3] >> 6; + + ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]); + if (!ptk_pn) + return -1; + + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + else + tid = 0; + + /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */ + if (tid >= IWL_MAX_TID_COUNT) + return -1; + + /* load pn */ + pn[0] = extiv[7]; + pn[1] = extiv[6]; + pn[2] = extiv[5]; + pn[3] = extiv[4]; + pn[4] = extiv[1]; + pn[5] = extiv[0]; + + if (memcmp(pn, ptk_pn->q[queue].pn[tid], + IEEE80211_CCMP_PN_LEN) <= 0) + return -1; + + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + stats->flag |= RX_FLAG_PN_VALIDATED; + + return 0; +} + +/* iwl_mvm_create_skb Adds the rxb to a new skb */ +static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, + u16 len, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) +{ + unsigned int hdrlen, fraglen; + + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr (including crypto if present, and + * an additional 8 bytes for SNAP/ethertype, see below) so that + * splice() or TCP coalesce are more efficient. + * + * Since, in addition, ieee80211_data_to_8023() always pull in at + * least 8 bytes (possibly more for mesh) we can do the same here + * to save the cost of doing it later. That still doesn't pull in + * the actual IP header since the typical case has a SNAP header. + * If the latter changes (there are efforts in the standards group + * to do so) we should revisit this and ieee80211_data_to_8023(). + */ + hdrlen = (len <= skb_tailroom(skb)) ? len : + sizeof(*hdr) + crypt_len + 8; + + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; + + if (fraglen) { + int offset = (void *)hdr + hdrlen - + rxb_addr(rxb) + rxb_offset(rxb); + + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } +} + +/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ +static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct napi_struct *napi, + struct sk_buff *skb, int queue, + struct ieee80211_sta *sta) +{ + if (iwl_mvm_check_pn(mvm, skb, queue, sta)) + kfree_skb(skb); + else + ieee80211_rx_napi(mvm->hw, skb, napi); +} + +static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, + struct iwl_rx_mpdu_desc *desc, + struct ieee80211_rx_status *rx_status) +{ + int energy_a, energy_b, energy_c, max_energy; + + energy_a = desc->energy_a; + energy_a = energy_a ? -energy_a : S8_MIN; + energy_b = desc->energy_b; + energy_b = energy_b ? -energy_b : S8_MIN; + energy_c = desc->energy_c; + energy_c = energy_c ? -energy_c : S8_MIN; + max_energy = max(energy_a, energy_b); + max_energy = max(max_energy, energy_c); + + IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", + energy_a, energy_b, energy_c, max_energy); + + rx_status->signal = max_energy; + rx_status->chains = 0; /* TODO: phy info */ + rx_status->chain_signal[0] = energy_a; + rx_status->chain_signal[1] = energy_b; + rx_status->chain_signal[2] = energy_c; +} + +static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *stats, + struct iwl_rx_mpdu_desc *desc, int queue, + u8 *crypt_len) +{ + u16 status = le16_to_cpu(desc->status); + + if (!ieee80211_has_protected(hdr->frame_control) || + (status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_NONE) + return 0; + + /* TODO: handle packets encrypted with unknown alg */ + + switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) { + case IWL_RX_MPDU_STATUS_SEC_CCM: + case IWL_RX_MPDU_STATUS_SEC_GCM: + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); + /* alg is CCM: check MIC only */ + if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + *crypt_len = IEEE80211_CCMP_HDR_LEN; + return 0; + case IWL_RX_MPDU_STATUS_SEC_TKIP: + /* Don't drop the frame and decrypt it in SW */ + if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) + return 0; + + *crypt_len = IEEE80211_TKIP_IV_LEN; + /* fall through if TTAK OK */ + case IWL_RX_MPDU_STATUS_SEC_WEP: + if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_WEP) + *crypt_len = IEEE80211_WEP_IV_LEN; + return 0; + case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: + if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) + return -1; + stats->flag |= RX_FLAG_DECRYPTED; + return 0; + default: + IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); + } + + return 0; +} + +static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, + struct sk_buff *skb, + struct iwl_rx_mpdu_desc *desc) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvmvif->features & NETIF_F_RXCSUM && + desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) && + desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue) +{ + struct ieee80211_rx_status *rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; + struct ieee80211_hdr *hdr = (void *)(desc + 1); + u32 len = le16_to_cpu(desc->mpdu_len); + u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); + struct ieee80211_sta *sta = NULL; + struct sk_buff *skb; + u8 crypt_len = 0; + + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { + kfree_skb(skb); + return; + } + + /* + * Keep packets with CRC errors (and with overrun) for monitor mode + * (otherwise the firmware discards them) but mark them as bad. + */ + if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) || + !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { + IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", + le16_to_cpu(desc->status)); + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + } + + rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); + rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); + rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ : + IEEE80211_BAND_2GHZ; + rx_status->freq = ieee80211_channel_to_frequency(desc->channel, + rx_status->band); + iwl_mvm_get_signal_strength(mvm, desc, rx_status); + + rcu_read_lock(); + + if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { + u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; + + if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); + if (IS_ERR(sta)) + sta = NULL; + } + } else if (!is_multicast_ether_addr(hdr->addr2)) { + /* + * This is fine since we prevent two stations with the same + * address from being added. + */ + sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); + } + + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* + * We have tx blocked stations (with CS bit). If we heard + * frames from a blocked station on a new channel we can + * TX to it again. + */ + if (unlikely(mvm->csa_tx_block_bcn_timeout)) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + + rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); + + if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && + ieee80211_is_beacon(hdr->frame_control)) { + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; + bool trig_check; + s32 rssi; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, + FW_DBG_TRIGGER_RSSI); + rssi_trig = (void *)trig->data; + rssi = le32_to_cpu(rssi_trig->rssi); + + trig_check = + iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + trig); + if (trig_check && rx_status->signal < rssi) + iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + } + + /* TODO: multi queue TCM */ + + if (ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, desc); + } + + /* + * TODO: PHY info. + * Verify we don't have the information in the MPDU descriptor and + * that it is not needed. + * Make sure for monitor mode that we are on default queue, update + * ampdu_ref and the rest of phy info then + */ + + /* Set up the HT phy flags */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->flag |= RX_FLAG_40MHZ; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + break; + } + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->flag |= RX_FLAG_SHORT_GI; + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + rx_status->flag |= RX_FLAG_HT_GF; + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->flag |= RX_FLAG_LDPC; + if (rate_n_flags & RATE_MCS_HT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->vht_nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->flag |= RX_FLAG_VHT; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->vht_flag |= RX_VHT_FLAG_BF; + } else { + rx_status->rate_idx = + iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + } + + /* TODO: PHY info - update ampdu queue statistics (for debugfs) */ + /* TODO: PHY info - gscan */ + + iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + rcu_read_unlock(); +} + +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, int queue) +{ + /* TODO */ +} diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index d6e0c1b5c20c..9a15642f80dd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -72,13 +72,6 @@ #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 -enum iwl_mvm_scan_type { - IWL_SCAN_TYPE_UNASSOC, - IWL_SCAN_TYPE_WILD, - IWL_SCAN_TYPE_MILD, - IWL_SCAN_TYPE_FRAGMENTED, -}; - enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_LOW, IWL_MVM_TRAFFIC_MEDIUM, @@ -89,6 +82,7 @@ struct iwl_mvm_scan_timing_params { u32 dwell_active; u32 dwell_passive; u32 dwell_fragmented; + u32 dwell_extended; u32 suspend_time; u32 max_out_time; }; @@ -98,6 +92,7 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { .dwell_active = 10, .dwell_passive = 110, .dwell_fragmented = 44, + .dwell_extended = 90, .suspend_time = 0, .max_out_time = 0, }, @@ -105,6 +100,7 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { .dwell_active = 10, .dwell_passive = 110, .dwell_fragmented = 44, + .dwell_extended = 90, .suspend_time = 30, .max_out_time = 120, }, @@ -112,6 +108,7 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { .dwell_active = 10, .dwell_passive = 110, .dwell_fragmented = 44, + .dwell_extended = 90, .suspend_time = 120, .max_out_time = 120, }, @@ -206,9 +203,7 @@ static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) } static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) { int global_cnt = 0; enum iwl_mvm_traffic_load load; @@ -224,8 +219,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, load = iwl_mvm_get_traffic_load(mvm); low_latency = iwl_mvm_low_latency(mvm); - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && - vif->type != NL80211_IFTYPE_P2P_DEVICE && + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) return IWL_SCAN_TYPE_FRAGMENTED; @@ -333,6 +327,13 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); + /* If this happens, the firmware has mistakenly sent an LMAC + * notification during UMAC scans -- warn and ignore it. + */ + if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN))) + return; + /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); @@ -719,6 +720,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, cmd->active_dwell = scan_timing[params->type].dwell_active; cmd->passive_dwell = scan_timing[params->type].dwell_passive; cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->extended_dwell = scan_timing[params->type].dwell_extended; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); @@ -752,8 +754,15 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, vif->type != NL80211_IFTYPE_P2P_DEVICE); } +static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) +{ + return params->n_scan_plans == 1 && + params->scan_plans[0].iterations == 1; +} + static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, - struct iwl_mvm_scan_params *params) + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif) { int flags = 0; @@ -779,6 +788,11 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; #endif + if (iwl_mvm_is_regular_scan(params) && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + params->type != IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; + return flags; } @@ -807,7 +821,8 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->delay = cpu_to_le32(params->delay); - cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params)); + cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params, + vif)); cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | @@ -910,10 +925,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; + if (type == mvm->scan_type) + return 0; + cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; scan_config = kzalloc(cmd_size, GFP_KERNEL); @@ -928,15 +947,20 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| - SCAN_CONFIG_N_CHANNELS(num_channels)); + SCAN_CONFIG_N_CHANNELS(num_channels) | + (type == IWL_SCAN_TYPE_FRAGMENTED ? + SCAN_CONFIG_FLAG_SET_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED)); scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); - scan_config->out_of_channel_time = cpu_to_le32(170); - scan_config->suspend_time = cpu_to_le32(30); - scan_config->dwell_active = 20; - scan_config->dwell_passive = 110; - scan_config->dwell_fragmented = 20; + scan_config->out_of_channel_time = + cpu_to_le32(scan_timing[type].max_out_time); + scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); + scan_config->dwell_active = scan_timing[type].dwell_active; + scan_config->dwell_passive = scan_timing[type].dwell_passive; + scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented; + scan_config->dwell_extended = scan_timing[type].dwell_extended; memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); @@ -960,6 +984,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); + if (!ret) + mvm->scan_type = type; kfree(scan_config); return ret; @@ -976,16 +1002,11 @@ static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) return -ENOENT; } -static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) -{ - return params->n_scan_plans == 1 && - params->scan_plans[0].iterations == 1; -} - static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { + cmd->extended_dwell = scan_timing[params->type].dwell_extended; cmd->active_dwell = scan_timing[params->type].dwell_active; cmd->passive_dwell = scan_timing[params->type].dwell_passive; cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; @@ -1020,7 +1041,8 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, } static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, - struct iwl_mvm_scan_params *params) + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif) { int flags = 0; @@ -1048,6 +1070,12 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (mvm->scan_iter_notif_enabled) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #endif + + if (iwl_mvm_is_regular_scan(params) && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + params->type != IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; + return flags; } @@ -1078,7 +1106,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_uid_status[uid] = type; cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); + cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, + vif)); if (type == IWL_MVM_SCAN_SCHED) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); @@ -1150,7 +1179,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; - iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: /* No need to stop anything for net-detect since the * firmware is restarted anyway. This way, any sched @@ -1213,7 +1242,9 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; - params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); + params.type = + iwl_mvm_get_scan_type(mvm, + vif->type == NL80211_IFTYPE_P2P_DEVICE); iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); @@ -1295,7 +1326,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; - params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); + params.type = + iwl_mvm_get_scan_type(mvm, + vif->type == NL80211_IFTYPE_P2P_DEVICE); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index b0f59fdd287c..c2def1232a8c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 354acbde088e..b556e33658d7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -106,6 +106,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK), + .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), }; int ret; u32 status; @@ -277,11 +278,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (sta_id == IWL_MVM_STATION_COUNT) return -ENOSPC; - if (vif->type == NL80211_IFTYPE_AP) { - mvmvif->ap_assoc_sta_count++; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - spin_lock_init(&mvm_sta->lock); mvm_sta->sta_id = sta_id; @@ -580,9 +576,9 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, return ret; } -static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) +int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 qmask, enum nl80211_iftype iftype) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); @@ -622,6 +618,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, color)); cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) memcpy(cmd.addr, addr, ETH_ALEN); @@ -671,6 +668,33 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) return ret; } +int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, + mvmvif->id, 0); +} + +int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + + return ret; +} + +void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) +{ + iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); +} + void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); @@ -1196,22 +1220,17 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) if (max_offs < 0) return STA_KEY_IDX_INVALID; - __set_bit(max_offs, mvm->fw_key_table); - return max_offs; } -static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (sta) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - return mvm_sta->sta_id; - } + if (sta) + return iwl_mvm_sta_from_mac80211(sta); /* * The device expects GTKs for station interfaces to be @@ -1222,20 +1241,20 @@ static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm, mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { u8 sta_id = mvmvif->ap_sta_id; - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); + sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ if (IS_ERR_OR_NULL(sta)) - return IWL_MVM_STATION_COUNT; + return NULL; - return sta_id; + return iwl_mvm_sta_from_mac80211(sta); } - return IWL_MVM_STATION_COUNT; + return NULL; } static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, @@ -1452,6 +1471,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + struct iwl_mvm_sta *mvm_sta; u8 sta_id; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; @@ -1459,11 +1479,12 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); - if (sta_id == IWL_MVM_STATION_COUNT) { - IWL_ERR(mvm, "Failed to find station id\n"); + mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); + if (!mvm_sta) { + IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } + sta_id = mvm_sta->sta_id; if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); @@ -1505,10 +1526,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); - if (ret) { - __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + if (ret) goto end; - } /* * For WEP, the same key is used for multicast and unicast. Upload it @@ -1521,11 +1540,13 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast); if (ret) { - __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); + goto end; } } + __set_bit(key_offset, mvm->fw_key_table); + end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, @@ -1539,13 +1560,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); - u8 sta_id; + struct iwl_mvm_sta *mvm_sta; + u8 sta_id = IWL_MVM_STATION_COUNT; int ret, i; lockdep_assert_held(&mvm->mutex); - /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); + /* Get the station from the mvm local station table */ + mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); @@ -1566,11 +1588,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, } mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; - if (sta_id == IWL_MVM_STATION_COUNT) { + if (!mvm_sta) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; } + sta_id = mvm_sta->sta_id; + ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; @@ -1590,25 +1614,17 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; - u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); - if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) - return; - rcu_read_lock(); - if (!sta) { - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - return; - } - } - - mvm_sta = iwl_mvm_sta_from_mac80211(sta); + mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); + if (WARN_ON_ONCE(!mvm_sta)) + goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); + + unlock: rcu_read_unlock(); } @@ -1662,6 +1678,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, */ if (agg) { int remaining = cnt; + int sleep_tx_count; spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { @@ -1686,9 +1703,12 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, } remaining -= n_queued; } + sleep_tx_count = cnt - remaining; + if (reason == IEEE80211_FRAME_RELEASE_UAPSD) + mvmsta->sleep_tx_count = sleep_tx_count; spin_unlock_bh(&mvmsta->lock); - cmd.sleep_tx_count = cpu_to_le16(cnt - remaining); + cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); if (WARN_ON(cnt - remaining == 0)) { ieee80211_sta_eosp(sta); return; @@ -1706,7 +1726,12 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); } - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + /* block the Tx queues until the FW updated the sleep Tx count */ + iwl_trans_block_txq_ptrs(mvm->trans, true); + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, + CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, + sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 0631cc0a6d3c..39fdf5224e81 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -284,6 +286,13 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) tid_data->next_reclaimed); } +struct iwl_mvm_key_pn { + struct rcu_head rcu_head; + struct { + u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; + } ____cacheline_aligned_in_smp q[]; +}; + /** * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) @@ -303,6 +312,12 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) + * @sleep_tx_count: the number of frames that we told the firmware to let out + * even when that station is asleep. This is useful in case the queue + * gets empty before all the frames were sent, which can happen when + * we are sending frames from an AMPDU queue and there was a hole in + * the BA window. To be used for UAPSD only. + * @ptk_pn: per-queue PTK PN data structures * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -323,12 +338,15 @@ struct iwl_mvm_sta { struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; + struct iwl_mvm_key_pn __rcu *ptk_pn[4]; + /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; bool disable_tx; u8 agg_tids; + u8 sleep_tx_count; }; static inline struct iwl_mvm_sta * @@ -401,7 +419,13 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 qmask, enum nl80211_iftype iftype); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index fe2fa5650443..18711c5de35a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h index 79ab6beb6b26..cbbc16fd006a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/testmode.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 7530eb23035d..924dd6a41626 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -73,6 +73,7 @@ #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" +#include "fw-dbg.h" /* * For the high priority TE use a time event type that has similar priority to @@ -791,11 +792,9 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } -void iwl_mvm_stop_roc(struct iwl_mvm *mvm) +static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm) { - struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_time_event_data *te_data; - bool is_p2p = false; lockdep_assert_held(&mvm->mutex); @@ -809,11 +808,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - is_p2p = true; - goto remove_te; - } + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) + goto out; } /* There can only be at most one AUX ROC time event, we just use the @@ -822,18 +818,35 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, struct iwl_mvm_time_event_data, list); +out: + spin_unlock_bh(&mvm->time_event_lock); + return te_data; +} + +void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) +{ + struct iwl_mvm_time_event_data *te_data; + u32 uid; + + te_data = iwl_mvm_get_roc_te(mvm); if (te_data) - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + __iwl_mvm_remove_time_event(mvm, te_data, &uid); +} -remove_te: - spin_unlock_bh(&mvm->time_event_lock); +void iwl_mvm_stop_roc(struct iwl_mvm *mvm) +{ + struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_time_event_data *te_data; - if (!mvmvif) { + te_data = iwl_mvm_get_roc_te(mvm); + if (!te_data) { IWL_WARN(mvm, "No remain on channel event\n"); return; } - if (is_p2p) + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) iwl_mvm_remove_time_event(mvm, mvmvif, te_data); else iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index cbdf8e52a5f1..99d9a35ad5b1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -215,6 +215,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data); +void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm); void iwl_mvm_roc_done_wk(struct work_struct *wk); /** diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index 4007f1d421dd..a1947d6f3a2c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h index 9beebc33cb8d..8c3421c9991d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -25,7 +25,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index cadfc0460597..fb76004eede4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -120,7 +120,7 @@ static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, int len = iwl_rx_packet_payload_len(pkt); int temp; - if (WARN_ON_ONCE(len != sizeof(*notif))) { + if (WARN_ON_ONCE(len < sizeof(*notif))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return -EINVAL; } diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c652a66be803..8bf48a7d0f4e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -64,11 +64,13 @@ *****************************************************************************/ #include <linux/ieee80211.h> #include <linux/etherdevice.h> +#include <linux/tcp.h> #include "iwl-trans.h" #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" +#include "fw-dbg.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, @@ -344,8 +346,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); memset(&info->status, 0, sizeof(info->status)); + memset(info->driver_data, 0, sizeof(info->driver_data)); - info->driver_data[0] = NULL; info->driver_data[1] = dev_cmd; return dev_cmd; @@ -424,11 +426,39 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return 0; } +static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso, + struct ieee80211_sta *sta, + struct sk_buff_head *mpdus_skb) +{ + struct sk_buff *tmp, *next; + char cb[sizeof(skb_gso->cb)]; + + memcpy(cb, skb_gso->cb, sizeof(cb)); + next = skb_gso_segment(skb_gso, 0); + if (IS_ERR(next)) + return -EINVAL; + else if (next) + consume_skb(skb_gso); + + while (next) { + tmp = next; + next = tmp->next; + memcpy(tmp->cb, cb, sizeof(tmp->cb)); + + tmp->prev = NULL; + tmp->next = NULL; + + __skb_queue_tail(mpdus_skb, tmp); + } + + return 0; +} + /* * Sets the fields in the Tx cmd that are crypto related */ -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) +static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -524,6 +554,51 @@ drop: return -1; } +int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct sk_buff_head mpdus_skbs; + unsigned int payload_len; + int ret; + + if (WARN_ON_ONCE(!mvmsta)) + return -1; + + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + return -1; + + if (!skb_is_gso(skb)) + return iwl_mvm_tx_mpdu(mvm, skb, sta); + + payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - + tcp_hdrlen(skb) + skb->data_len; + + if (payload_len <= skb_shinfo(skb)->gso_size) + return iwl_mvm_tx_mpdu(mvm, skb, sta); + + __skb_queue_head_init(&mpdus_skbs); + + ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs); + if (ret) + return ret; + + if (WARN_ON(skb_queue_empty(&mpdus_skbs))) + return ret; + + while (!skb_queue_empty(&mpdus_skbs)) { + struct sk_buff *skb = __skb_dequeue(&mpdus_skbs); + + ret = iwl_mvm_tx_mpdu(mvm, skb, sta); + if (ret) { + __skb_queue_purge(&mpdus_skbs); + return ret; + } + } + + return 0; +} + static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 tid) { @@ -787,13 +862,43 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, if (tid != IWL_TID_NON_QOS) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); iwl_mvm_check_ratid_empty(mvm, sta, tid); + + if (mvmsta->sleep_tx_count) { + mvmsta->sleep_tx_count--; + if (mvmsta->sleep_tx_count && + !iwl_mvm_tid_queued(tid_data)) { + /* + * The number of frames in the queue + * dropped to 0 even if we sent less + * frames than we thought we had on the + * Tx queue. + * This means we had holes in the BA + * window that we just filled, ask + * mac80211 to send EOSP since the + * firmware won't know how to do that. + * Send NDP and the firmware will send + * EOSP notification that will trigger + * a call to ieee80211_sta_eosp(). + */ + send_eosp_ndp = true; + } + } + spin_unlock_bh(&mvmsta->lock); + if (send_eosp_ndp) { + iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, + IEEE80211_FRAME_RELEASE_UAPSD, + 1, tid, false, false); + mvmsta->sleep_tx_count = 0; + ieee80211_send_eosp_nullfunc(sta, tid); + } } if (mvmsta->next_status_eosp) { diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index ad0f16909e2e..3a989f5c20db 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -27,7 +27,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -68,7 +68,7 @@ #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-prph.h" - +#include "fw-dbg.h" #include "mvm.h" #include "fw-api-rs.h" diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 639761fb2bfb..6261a68cae90 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -377,6 +377,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, +/* 3168 Series */ + {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, + /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, @@ -384,6 +388,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, @@ -401,10 +406,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, @@ -467,6 +472,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, + +/* 9000 Series */ + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -598,7 +619,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) set_dflt_pwr_limit(iwl_trans, pdev); /* register transport layer debugfs here */ - ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir); + ret = iwl_trans_pcie_dbgfs_register(iwl_trans); if (ret) goto out_free_drv; diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index feb2f7e81134..cc3888e2700d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -23,7 +23,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -278,6 +278,7 @@ struct iwl_txq { bool frozen; u8 active; bool ampdu; + bool block; unsigned long wd_timeout; }; @@ -288,6 +289,11 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) sizeof(struct iwl_pcie_txq_scratch_buf) * idx; } +struct iwl_tso_hdr_page { + struct page *page; + u8 *pos; +}; + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data @@ -302,10 +308,12 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number - * @rx_buf_size_8k: 8 kB RX buffer size + * @rx_buf_size: Rx buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @wide_cmd_header: true when ucode supports wide command header format + * @sw_csum_tx: if true, then the transport will compute the csum of the TXed + * frame. * @rx_page_order: page order for receive buffer size * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw @@ -323,6 +331,8 @@ struct iwl_trans_pcie { struct net_device napi_dev; struct napi_struct napi; + struct __percpu iwl_tso_hdr_page *tso_hdr_page; + /* INT ICT Table */ __le32 *ict_tbl; dma_addr_t ict_tbl_dma; @@ -356,14 +366,13 @@ struct iwl_trans_pcie { u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; - bool rx_buf_size_8k; + enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; bool wide_cmd_header; + bool sw_csum_tx; u32 rx_page_order; - const char *const *command_names; - /*protect hw register */ spinlock_t reg_lock; bool cmd_hold_nic_awake; @@ -378,8 +387,11 @@ struct iwl_trans_pcie { u32 fw_mon_size; }; -#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ - ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) +static inline struct iwl_trans_pcie * +IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) +{ + return (void *)trans->trans_specific; +} static inline struct iwl_trans * iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) @@ -523,14 +535,6 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) return index & (q->n_window - 1); } -static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, - u8 cmd) -{ - if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) - return "UNKNOWN"; - return trans_pcie->command_names[cmd]; -} - static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { return !(iwl_read32(trans, CSR_GP_CNTRL) & @@ -566,4 +570,13 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); +#else +static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) +{ + return 0; +} +#endif + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e06591f625c4..ccafbd8cf4b3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -23,7 +23,7 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ @@ -602,10 +602,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - if (trans_pcie->rx_buf_size_8k) + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_4K: + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; - else + break; + case IWL_AMSDU_12K: + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; + break; + default: + WARN_ON(1); rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + } /* Stop Rx DMA */ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); @@ -629,7 +639,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * the credit mechanism in 5000 HW RX FIFO * Direct rx interrupts to hosts - * Rx buffer size 4 or 8k + * Rx buffer size 4 or 8k or 12k * RB timeout 0x10 * 256 RBDs */ @@ -867,7 +877,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", rxcb._offset, - get_cmd_string(trans_pcie, pkt->hdr.cmd), + iwl_get_cmd_string(trans, + iwl_cmd_id(pkt->hdr.cmd, + pkt->hdr.group_id, + 0)), pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); len = iwl_rx_packet_len(pkt); @@ -986,8 +999,7 @@ restart: rxb = rxq->queue[i]; rxq->queue[i] = NULL; - IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", - r, i, rxb); + IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); iwl_pcie_rx_handle_rb(trans, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; @@ -1481,10 +1493,6 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans) return -EINVAL; } - IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n", - (unsigned long long)trans_pcie->ict_tbl_dma, - trans_pcie->ict_tbl); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 90283453073c..d60a467a983c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,13 +27,14 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -924,9 +926,16 @@ monitor: if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans_pcie->fw_mon_phys >> dest->base_shift); - iwl_write_prph(trans, le32_to_cpu(dest->end_reg), - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> dest->end_shift); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size - 256) >> + dest->end_shift); + else + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size) >> + dest->end_shift); } } @@ -1213,7 +1222,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->wowlan_d0i3) { + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); @@ -1237,7 +1246,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - if (!trans->wowlan_d0i3) { + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try @@ -1286,7 +1295,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - if (trans->wowlan_d0i3) { + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { @@ -1435,16 +1444,17 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); - trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; - if (trans_pcie->rx_buf_size_8k) - trans_pcie->rx_page_order = get_order(8 * 1024); - else - trans_pcie->rx_page_order = get_order(4 * 1024); + trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; + trans_pcie->rx_page_order = + iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; - trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; + trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; + + trans->command_groups = trans_cfg->command_groups; + trans->command_groups_size = trans_cfg->command_groups_size; /* init ref_count to 1 (should be cleared when ucode is loaded) */ trans_pcie->ref_count = 1; @@ -1464,6 +1474,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; synchronize_irq(trans_pcie->pci_dev->irq); @@ -1483,6 +1494,15 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_fw_monitor(trans); + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans_pcie->tso_hdr_page, i); + + if (p->page) + __free_page(p->page); + } + + free_percpu(trans_pcie->tso_hdr_page); iwl_trans_free(trans); } @@ -1494,8 +1514,8 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) clear_bit(STATUS_TPOWER_PMI, &trans->status); } -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, - unsigned long *flags) +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, + unsigned long *flags) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1536,14 +1556,11 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - if (!silent) { - u32 val = iwl_read32(trans, CSR_GP_CNTRL); - WARN_ONCE(1, - "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", - val); - spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); - return false; - } + WARN_ONCE(1, + "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", + iwl_read32(trans, CSR_GP_CNTRL)); + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); + return false; } out: @@ -1591,7 +1608,7 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, int offs, ret = 0; u32 *vals = buf; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { + if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); for (offs = 0; offs < dwords; offs++) vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); @@ -1609,7 +1626,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, int offs, ret = 0; const u32 *vals = buf; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { + if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); for (offs = 0; offs < dwords; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, @@ -1675,6 +1692,33 @@ next_queue: } } +static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = &trans_pcie->txq[i]; + + if (i == trans_pcie->cmd_queue) + continue; + + spin_lock_bh(&txq->lock); + + if (!block && !(WARN_ON_ONCE(!txq->block))) { + txq->block--; + if (!txq->block) { + iwl_write32(trans, HBUS_TARG_WRPTR, + txq->q.write_ptr | (i << 8)); + } + } else if (block) { + txq->block++; + } + + spin_unlock_bh(&txq->lock); + } +} + #define IWL_FLUSH_WAIT_MS 2000 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) @@ -2109,13 +2153,11 @@ DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_WRITE_FILE_OPS(csr); -/* - * Create the debugfs files and directories - * - */ -static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) +/* Create the debugfs files and directories */ +int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { + struct dentry *dir = trans->dbgfs_dir; + DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); @@ -2127,12 +2169,6 @@ err: IWL_ERR(trans, "failed to create the trans debugfs entry\n"); return -ENOMEM; } -#else -static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) -{ - return 0; -} #endif /*CONFIG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) @@ -2146,144 +2182,6 @@ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) return cmdlen; } -static const struct { - u32 start, end; -} iwl_prph_dump_addr[] = { - { .start = 0x00a00000, .end = 0x00a00000 }, - { .start = 0x00a0000c, .end = 0x00a00024 }, - { .start = 0x00a0002c, .end = 0x00a0003c }, - { .start = 0x00a00410, .end = 0x00a00418 }, - { .start = 0x00a00420, .end = 0x00a00420 }, - { .start = 0x00a00428, .end = 0x00a00428 }, - { .start = 0x00a00430, .end = 0x00a0043c }, - { .start = 0x00a00444, .end = 0x00a00444 }, - { .start = 0x00a004c0, .end = 0x00a004cc }, - { .start = 0x00a004d8, .end = 0x00a004d8 }, - { .start = 0x00a004e0, .end = 0x00a004f0 }, - { .start = 0x00a00840, .end = 0x00a00840 }, - { .start = 0x00a00850, .end = 0x00a00858 }, - { .start = 0x00a01004, .end = 0x00a01008 }, - { .start = 0x00a01010, .end = 0x00a01010 }, - { .start = 0x00a01018, .end = 0x00a01018 }, - { .start = 0x00a01024, .end = 0x00a01024 }, - { .start = 0x00a0102c, .end = 0x00a01034 }, - { .start = 0x00a0103c, .end = 0x00a01040 }, - { .start = 0x00a01048, .end = 0x00a01094 }, - { .start = 0x00a01c00, .end = 0x00a01c20 }, - { .start = 0x00a01c58, .end = 0x00a01c58 }, - { .start = 0x00a01c7c, .end = 0x00a01c7c }, - { .start = 0x00a01c28, .end = 0x00a01c54 }, - { .start = 0x00a01c5c, .end = 0x00a01c5c }, - { .start = 0x00a01c60, .end = 0x00a01cdc }, - { .start = 0x00a01ce0, .end = 0x00a01d0c }, - { .start = 0x00a01d18, .end = 0x00a01d20 }, - { .start = 0x00a01d2c, .end = 0x00a01d30 }, - { .start = 0x00a01d40, .end = 0x00a01d5c }, - { .start = 0x00a01d80, .end = 0x00a01d80 }, - { .start = 0x00a01d98, .end = 0x00a01d9c }, - { .start = 0x00a01da8, .end = 0x00a01da8 }, - { .start = 0x00a01db8, .end = 0x00a01df4 }, - { .start = 0x00a01dc0, .end = 0x00a01dfc }, - { .start = 0x00a01e00, .end = 0x00a01e2c }, - { .start = 0x00a01e40, .end = 0x00a01e60 }, - { .start = 0x00a01e68, .end = 0x00a01e6c }, - { .start = 0x00a01e74, .end = 0x00a01e74 }, - { .start = 0x00a01e84, .end = 0x00a01e90 }, - { .start = 0x00a01e9c, .end = 0x00a01ec4 }, - { .start = 0x00a01ed0, .end = 0x00a01ee0 }, - { .start = 0x00a01f00, .end = 0x00a01f1c }, - { .start = 0x00a01f44, .end = 0x00a01ffc }, - { .start = 0x00a02000, .end = 0x00a02048 }, - { .start = 0x00a02068, .end = 0x00a020f0 }, - { .start = 0x00a02100, .end = 0x00a02118 }, - { .start = 0x00a02140, .end = 0x00a0214c }, - { .start = 0x00a02168, .end = 0x00a0218c }, - { .start = 0x00a021c0, .end = 0x00a021c0 }, - { .start = 0x00a02400, .end = 0x00a02410 }, - { .start = 0x00a02418, .end = 0x00a02420 }, - { .start = 0x00a02428, .end = 0x00a0242c }, - { .start = 0x00a02434, .end = 0x00a02434 }, - { .start = 0x00a02440, .end = 0x00a02460 }, - { .start = 0x00a02468, .end = 0x00a024b0 }, - { .start = 0x00a024c8, .end = 0x00a024cc }, - { .start = 0x00a02500, .end = 0x00a02504 }, - { .start = 0x00a0250c, .end = 0x00a02510 }, - { .start = 0x00a02540, .end = 0x00a02554 }, - { .start = 0x00a02580, .end = 0x00a025f4 }, - { .start = 0x00a02600, .end = 0x00a0260c }, - { .start = 0x00a02648, .end = 0x00a02650 }, - { .start = 0x00a02680, .end = 0x00a02680 }, - { .start = 0x00a026c0, .end = 0x00a026d0 }, - { .start = 0x00a02700, .end = 0x00a0270c }, - { .start = 0x00a02804, .end = 0x00a02804 }, - { .start = 0x00a02818, .end = 0x00a0281c }, - { .start = 0x00a02c00, .end = 0x00a02db4 }, - { .start = 0x00a02df4, .end = 0x00a02fb0 }, - { .start = 0x00a03000, .end = 0x00a03014 }, - { .start = 0x00a0301c, .end = 0x00a0302c }, - { .start = 0x00a03034, .end = 0x00a03038 }, - { .start = 0x00a03040, .end = 0x00a03048 }, - { .start = 0x00a03060, .end = 0x00a03068 }, - { .start = 0x00a03070, .end = 0x00a03074 }, - { .start = 0x00a0307c, .end = 0x00a0307c }, - { .start = 0x00a03080, .end = 0x00a03084 }, - { .start = 0x00a0308c, .end = 0x00a03090 }, - { .start = 0x00a03098, .end = 0x00a03098 }, - { .start = 0x00a030a0, .end = 0x00a030a0 }, - { .start = 0x00a030a8, .end = 0x00a030b4 }, - { .start = 0x00a030bc, .end = 0x00a030bc }, - { .start = 0x00a030c0, .end = 0x00a0312c }, - { .start = 0x00a03c00, .end = 0x00a03c5c }, - { .start = 0x00a04400, .end = 0x00a04454 }, - { .start = 0x00a04460, .end = 0x00a04474 }, - { .start = 0x00a044c0, .end = 0x00a044ec }, - { .start = 0x00a04500, .end = 0x00a04504 }, - { .start = 0x00a04510, .end = 0x00a04538 }, - { .start = 0x00a04540, .end = 0x00a04548 }, - { .start = 0x00a04560, .end = 0x00a0457c }, - { .start = 0x00a04590, .end = 0x00a04598 }, - { .start = 0x00a045c0, .end = 0x00a045f4 }, -}; - -static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) -{ - struct iwl_fw_error_dump_prph *prph; - unsigned long flags; - u32 prph_len = 0, i; - - if (!iwl_trans_grab_nic_access(trans, false, &flags)) - return 0; - - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - int reg; - __le32 *val; - - prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); - (*data)->len = cpu_to_le32(sizeof(*prph) + - num_bytes_in_chunk); - prph = (void *)(*data)->data; - prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - val = (void *)prph->data; - - for (reg = iwl_prph_dump_addr[i].start; - reg <= iwl_prph_dump_addr[i].end; - reg += 4) - *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, - reg)); - *data = iwl_fw_error_next_data(*data); - } - - iwl_trans_release_nic_access(trans, &flags); - - return prph_len; -} - static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, int allocated_rb_nums) @@ -2354,7 +2252,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, __le32 *val; int i; - if (!iwl_trans_grab_nic_access(trans, false, &flags)) + if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); @@ -2381,13 +2279,14 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, unsigned long flags; u32 i; - if (!iwl_trans_grab_nic_access(trans, false, &flags)) + if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; - __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1); + iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); for (i = 0; i < buf_size_in_dwords; i++) - buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR); - __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0); + buffer[i] = iwl_read_prph_no_grab(trans, + MON_DMARB_RD_DATA_ADDR); + iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); iwl_trans_release_nic_access(trans, &flags); @@ -2474,7 +2373,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, static struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv *trigger) + const struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; @@ -2535,16 +2434,6 @@ static struct iwl_trans_dump_data /* CSR registers */ len += sizeof(*data) + IWL_CSR_TO_DUMP; - /* PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - /* FH registers */ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); @@ -2592,7 +2481,6 @@ static struct iwl_trans_dump_data len += sizeof(*data); data = iwl_fw_error_next_data(data); - len += iwl_trans_pcie_dump_prph(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data); len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) @@ -2623,10 +2511,9 @@ static const struct iwl_trans_ops trans_ops_pcie = { .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, - .dbgfs_register = iwl_trans_pcie_dbgfs_register, - .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, + .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, .write8 = iwl_trans_pcie_write8, .write32 = iwl_trans_pcie_write32, @@ -2671,6 +2558,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->ref_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); + if (!trans_pcie->tso_hdr_page) { + ret = -ENOMEM; + goto out_no_pci; + } ret = pci_enable_device(pdev); if (ret) @@ -2772,13 +2664,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_pci_disable_msi; } - if (iwl_trans_grab_nic_access(trans, false, &flags)) { + if (iwl_trans_grab_nic_access(trans, &flags)) { u32 hw_step; - hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG); + hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG); hw_step |= ENABLE_WFPM; - __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step); - hw_step = __iwl_read_prph(trans, AUX_MISC_REG); + iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); + hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; if (hw_step == 0x3) trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | @@ -2807,7 +2699,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } trans_pcie->inta_mask = CSR_INI_SET_MASK; - trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND; return trans; @@ -2820,6 +2711,7 @@ out_pci_release_regions: out_pci_disable_device: pci_disable_device(pdev); out_no_pci: + free_percpu(trans_pcie->tso_hdr_page); iwl_trans_free(trans); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index a8c8a4a7420b..5262028b5505 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -23,13 +23,17 @@ * file called LICENSE. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/etherdevice.h> +#include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/sched.h> +#include <net/ip6_checksum.h> +#include <net/tso.h> +#include <net/ip6_checksum.h> #include "iwl-debug.h" #include "iwl-csr.h" @@ -318,7 +322,9 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * trying to tx (during RFKILL, we're not trying to tx). */ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); - iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); + if (!txq->block) + iwl_write32(trans, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); } void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) @@ -576,6 +582,19 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return 0; } +static void iwl_pcie_free_tso_page(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) { + struct page *page = + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]; + + __free_page(page); + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL; + } +} + /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ @@ -589,6 +608,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) while (q->write_ptr != q->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, q->read_ptr); + + if (txq_id != trans_pcie->cmd_queue) { + struct sk_buff *skb = txq->entries[q->read_ptr].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_pcie_free_tso_page(skb); + } iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } @@ -742,7 +770,7 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) spin_lock(&trans_pcie->irq_lock); - if (!iwl_trans_grab_nic_access(trans, false, &flags)) + if (!iwl_trans_grab_nic_access(trans, &flags)) goto out; /* Stop each Tx DMA channel */ @@ -1006,11 +1034,14 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; q->read_ptr != tfd_num; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { + struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb; - if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) + if (WARN_ON_ONCE(!skb)) continue; - __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); + iwl_pcie_free_tso_page(skb); + + __skb_queue_tail(skbs, skb); txq->entries[txq->q.read_ptr].skb = NULL; @@ -1411,7 +1442,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", - get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { + iwl_get_cmd_string(trans, cmd->id), + cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } @@ -1501,7 +1533,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, IWL_DEBUG_HC(trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", - get_cmd_string(trans_pcie, out_cmd->hdr.cmd), + iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); @@ -1591,16 +1623,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * - * If an Rx buffer has an async callback associated with it the callback - * will be executed. The attached skb (if present) will only be freed - * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); + u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); + u32 cmd_id; int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; @@ -1626,6 +1656,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, cmd_index = get_cmd_index(&txq->q, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; + cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); @@ -1638,17 +1669,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->source->_rx_page_order = trans_pcie->rx_page_order; } + if (meta->flags & CMD_WANT_ASYNC_CALLBACK) + iwl_op_mode_async_cb(trans->op_mode, cmd); + iwl_pcie_cmdq_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", - get_cmd_string(trans_pcie, cmd->hdr.cmd)); + iwl_get_cmd_string(trans, cmd_id)); } clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->hdr.cmd)); + iwl_get_cmd_string(trans, cmd_id)); wake_up(&trans_pcie->wait_command_queue); } @@ -1662,7 +1696,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* An asynchronous command can not expect an SKB to be set. */ @@ -1673,7 +1706,7 @@ static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(trans_pcie, cmd->id), ret); + iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; @@ -1687,16 +1720,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", - get_cmd_string(trans_pcie, cmd->id)); + iwl_get_cmd_string(trans, cmd->id)); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", - get_cmd_string(trans_pcie, cmd->id))) + iwl_get_cmd_string(trans, cmd->id))) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->id)); + iwl_get_cmd_string(trans, cmd->id)); cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { @@ -1704,7 +1737,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(trans_pcie, cmd->id), ret); + iwl_get_cmd_string(trans, cmd->id), ret); return ret; } @@ -1717,7 +1750,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - get_cmd_string(trans_pcie, cmd->id), + iwl_get_cmd_string(trans, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", @@ -1725,7 +1758,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->id)); + iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; iwl_force_nmi(trans); @@ -1736,7 +1769,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, if (test_bit(STATUS_FW_ERROR, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", - get_cmd_string(trans_pcie, cmd->id)); + iwl_get_cmd_string(trans, cmd->id)); dump_stack(); ret = -EIO; goto cancel; @@ -1751,7 +1784,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", - get_cmd_string(trans_pcie, cmd->id)); + iwl_get_cmd_string(trans, cmd->id)); ret = -EIO; goto cancel; } @@ -1794,6 +1827,305 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) return iwl_pcie_send_hcmd_sync(trans, cmd); } +static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta, + struct iwl_device_cmd *dev_cmd, u16 tb1_len) +{ + struct iwl_queue *q = &txq->q; + u16 tb2_len; + int i; + + /* + * Set up TFD's third entry to point directly to remainder + * of skb's head, if any + */ + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + dma_addr_t tb2_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + return -EINVAL; + } + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + return -EINVAL; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + + trace_iwlwifi_dev_tx(trans->dev, skb, + &txq->tfds[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); + trace_iwlwifi_dev_tx_data(trans->dev, skb, + hdr_len, skb->len - hdr_len); + return 0; +} + +#ifdef CONFIG_INET +static struct iwl_tso_hdr_page * +get_page_hdr(struct iwl_trans *trans, size_t len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); + + if (!p->page) + goto alloc; + + /* enough room on this page */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) + return p; + + /* We don't have enough room on this page, get a new one. */ + __free_page(p->page); + +alloc: + p->page = alloc_page(GFP_ATOMIC); + if (!p->page) + return NULL; + p->pos = page_address(p->page); + return p; +} + +static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, + bool ipv6, unsigned int len) +{ + if (ipv6) { + struct ipv6hdr *iphv6 = iph; + + tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, + len + tcph->doff * 4, + IPPROTO_TCP, 0); + } else { + struct iphdr *iphv4 = iph; + + ip_send_check(iphv4); + tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, + len + tcph->doff * 4, + IPPROTO_TCP, 0); + } +} + +static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta, + struct iwl_device_cmd *dev_cmd, u16 tb1_len) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; + unsigned int mss = skb_shinfo(skb)->gso_size; + struct iwl_queue *q = &txq->q; + u16 length, iv_len, amsdu_pad; + u8 *start_hdr; + struct iwl_tso_hdr_page *hdr_page; + int ret; + struct tso_t tso; + + /* if the packet is protected, then it must be CCMP or GCMP */ + BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); + iv_len = ieee80211_has_protected(hdr->frame_control) ? + IEEE80211_CCMP_HDR_LEN : 0; + + trace_iwlwifi_dev_tx(trans->dev, skb, + &txq->tfds[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + NULL, 0); + + ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); + snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; + amsdu_pad = 0; + + /* total amount of header we may need for this A-MSDU */ + hdr_room = DIV_ROUND_UP(total_len, mss) * + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; + + /* Our device supports 9 segments at most, it will fit in 1 page */ + hdr_page = get_page_hdr(trans, hdr_room); + if (!hdr_page) + return -ENOMEM; + + get_page(hdr_page->page); + start_hdr = hdr_page->pos; + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page; + memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); + hdr_page->pos += iv_len; + + /* + * Pull the ieee80211 header + IV to be able to use TSO core, + * we will restore it for the tx_status flow. + */ + skb_pull(skb, hdr_len + iv_len); + + tso_start(skb, &tso); + + while (total_len) { + /* this is the data left for this subframe */ + unsigned int data_left = + min_t(unsigned int, mss, total_len); + struct sk_buff *csum_skb = NULL; + unsigned int hdr_tb_len; + dma_addr_t hdr_tb_phys; + struct tcphdr *tcph; + u8 *iph; + + total_len -= data_left; + + memset(hdr_page->pos, 0, amsdu_pad); + hdr_page->pos += amsdu_pad; + amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + + data_left)) & 0x3; + ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); + hdr_page->pos += ETH_ALEN; + ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); + hdr_page->pos += ETH_ALEN; + + length = snap_ip_tcp_hdrlen + data_left; + *((__be16 *)hdr_page->pos) = cpu_to_be16(length); + hdr_page->pos += sizeof(length); + + /* + * This will copy the SNAP as well which will be considered + * as MAC header. + */ + tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + iph = hdr_page->pos + 8; + tcph = (void *)(iph + ip_hdrlen); + + /* For testing on current hardware only */ + if (trans_pcie->sw_csum_tx) { + csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), + GFP_ATOMIC); + if (!csum_skb) { + ret = -ENOMEM; + goto out_unmap; + } + + iwl_compute_pseudo_hdr_csum(iph, tcph, + skb->protocol == + htons(ETH_P_IPV6), + data_left); + + memcpy(skb_put(csum_skb, tcp_hdrlen(skb)), + tcph, tcp_hdrlen(skb)); + skb_set_transport_header(csum_skb, 0); + csum_skb->csum_start = + (unsigned char *)tcp_hdr(csum_skb) - + csum_skb->head; + } + + hdr_page->pos += snap_ip_tcp_hdrlen; + + hdr_tb_len = hdr_page->pos - start_hdr; + hdr_tb_phys = dma_map_single(trans->dev, start_hdr, + hdr_tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { + dev_kfree_skb(csum_skb); + ret = -EINVAL; + goto out_unmap; + } + iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, + hdr_tb_len, false); + trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, + hdr_tb_len); + + /* prepare the start_hdr for the next subframe */ + start_hdr = hdr_page->pos; + + /* put the payload */ + while (data_left) { + unsigned int size = min_t(unsigned int, tso.size, + data_left); + dma_addr_t tb_phys; + + if (trans_pcie->sw_csum_tx) + memcpy(skb_put(csum_skb, size), tso.data, size); + + tb_phys = dma_map_single(trans->dev, tso.data, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + dev_kfree_skb(csum_skb); + ret = -EINVAL; + goto out_unmap; + } + + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + size, false); + trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, + size); + + data_left -= size; + tso_build_data(skb, &tso, size); + } + + /* For testing on early hardware only */ + if (trans_pcie->sw_csum_tx) { + __wsum csum; + + csum = skb_checksum(csum_skb, + skb_checksum_start_offset(csum_skb), + csum_skb->len - + skb_checksum_start_offset(csum_skb), + 0); + dev_kfree_skb(csum_skb); + dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, + hdr_tb_len, DMA_TO_DEVICE); + tcph->check = csum_fold(csum); + dma_sync_single_for_device(trans->dev, hdr_tb_phys, + hdr_tb_len, DMA_TO_DEVICE); + } + } + + /* re -add the WiFi header and IV */ + skb_push(skb, hdr_len + iv_len); + + return 0; + +out_unmap: + iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]); + return ret; +} +#else /* CONFIG_INET */ +static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta, + struct iwl_device_cmd *dev_cmd, u16 tb1_len) +{ + /* No A-MSDU without CONFIG_INET */ + WARN_ON(1); + + return -1; +} +#endif /* CONFIG_INET */ + int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { @@ -1805,12 +2137,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_queue *q; dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; - u16 len, tb1_len, tb2_len; + u16 len, tb1_len; bool wait_write_ptr; __le16 fc; u8 hdr_len; u16 wifi_seq; - int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1819,6 +2150,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; + if (unlikely(trans_pcie->sw_csum_tx && + skb->ip_summed == CHECKSUM_PARTIAL)) { + int offs = skb_checksum_start_offset(skb); + int csum_offs = offs + skb->csum_offset; + __wsum csum; + + if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) + return -1; + + csum = skb_checksum(skb, offs, skb->len - offs, 0); + *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); + } + if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && __skb_linearize(skb)) @@ -1893,57 +2237,20 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - /* - * Set up TFD's third entry to point directly to remainder - * of skb's head, if any - */ - tb2_len = skb_headlen(skb) - hdr_len; - if (tb2_len > 0) { - dma_addr_t tb2_phys = dma_map_single(trans->dev, - skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); - goto out_err; - } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); - } - - /* set up the remaining entries to point to the data */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr_t tb_phys; - int tb_idx; - - if (!skb_frag_size(frag)) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); + if (ieee80211_is_data_qos(fc) && + (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) { + if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, + out_meta, dev_cmd, + tb1_len))) goto out_err; - } - tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, - skb_frag_size(frag), false); - - out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, + out_meta, dev_cmd, tb1_len))) { + goto out_err; } /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); - trace_iwlwifi_dev_tx(trans->dev, skb, - &txq->tfds[txq->q.write_ptr], - sizeof(struct iwl_tfd), - &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, - skb->data + hdr_len, tb2_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, - hdr_len, skb->len - hdr_len); - wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ diff --git a/drivers/net/wireless/intersil/Kconfig b/drivers/net/wireless/intersil/Kconfig new file mode 100644 index 000000000000..9da136049955 --- /dev/null +++ b/drivers/net/wireless/intersil/Kconfig @@ -0,0 +1,38 @@ +config WLAN_VENDOR_INTERSIL + bool "Intersil devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_INTERSIL + +source "drivers/net/wireless/intersil/hostap/Kconfig" +source "drivers/net/wireless/intersil/orinoco/Kconfig" +source "drivers/net/wireless/intersil/p54/Kconfig" + +config PRISM54 + tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)' + depends on PCI + select WIRELESS_EXT + select WEXT_SPY + select WEXT_PRIV + select FW_LOADER + ---help--- + This enables support for FullMAC PCI/Cardbus prism54 devices. This + driver is now deprecated in favor for the SoftMAC driver, p54pci. + p54pci supports FullMAC PCI/Cardbus devices as well. + + For more information refer to the p54 wiki: + + http://wireless.kernel.org/en/users/Drivers/p54 + + Note: You need a motherboard with DMA support to use any of these cards + + When built as module you get the module prism54 + +endif # WLAN_VENDOR_INTERSIL diff --git a/drivers/net/wireless/intersil/Makefile b/drivers/net/wireless/intersil/Makefile new file mode 100644 index 000000000000..9a8cbfee3ea5 --- /dev/null +++ b/drivers/net/wireless/intersil/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_HOSTAP) += hostap/ +obj-$(CONFIG_HERMES) += orinoco/ +obj-$(CONFIG_P54_COMMON) += p54/ +obj-$(CONFIG_PRISM54) += prism54/ diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/intersil/hostap/Kconfig index 287d82728bc3..287d82728bc3 100644 --- a/drivers/net/wireless/hostap/Kconfig +++ b/drivers/net/wireless/intersil/hostap/Kconfig diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/intersil/hostap/Makefile index b8e41a702c00..b8e41a702c00 100644 --- a/drivers/net/wireless/hostap/Makefile +++ b/drivers/net/wireless/intersil/hostap/Makefile diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h index ce8721fbc10e..ce8721fbc10e 100644 --- a/drivers/net/wireless/hostap/hostap.h +++ b/drivers/net/wireless/intersil/hostap/hostap.h diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/intersil/hostap/hostap_80211.h index ed98ce7c8f65..ed98ce7c8f65 100644 --- a/drivers/net/wireless/hostap/hostap_80211.h +++ b/drivers/net/wireless/intersil/hostap/hostap_80211.h diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c index 599f30f22841..599f30f22841 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c index 055e11d353ca..055e11d353ca 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c index c995ace153ee..c995ace153ee 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/intersil/hostap/hostap_ap.h index 334e2d0b8e11..334e2d0b8e11 100644 --- a/drivers/net/wireless/hostap/hostap_ap.h +++ b/drivers/net/wireless/intersil/hostap/hostap_ap.h diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/intersil/hostap/hostap_common.h index 4230102ac9e4..4230102ac9e4 100644 --- a/drivers/net/wireless/hostap/hostap_common.h +++ b/drivers/net/wireless/intersil/hostap/hostap_common.h diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/intersil/hostap/hostap_config.h index 2c8f71f0ed45..2c8f71f0ed45 100644 --- a/drivers/net/wireless/hostap/hostap_config.h +++ b/drivers/net/wireless/intersil/hostap/hostap_config.h diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/intersil/hostap/hostap_cs.c index 50033aa7c7d5..74f63b7bf7b4 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/intersil/hostap/hostap_cs.c @@ -473,7 +473,7 @@ static int prism2_config(struct pcmcia_device *link) struct net_device *dev; struct hostap_interface *iface; local_info_t *local; - int ret = 1; + int ret; struct hostap_cs_priv *hw_priv; unsigned long flags; @@ -502,8 +502,10 @@ static int prism2_config(struct pcmcia_device *link) /* Need to allocate net_device before requesting IRQ handler */ dev = prism2_init_local_data(&prism2_pccard_funcs, 0, &link->dev); - if (dev == NULL) + if (!dev) { + ret = -ENOMEM; goto failed; + } link->priv = dev; iface = netdev_priv(dev); diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c index 705fe668b969..705fe668b969 100644 --- a/drivers/net/wireless/hostap/hostap_download.c +++ b/drivers/net/wireless/intersil/hostap/hostap_download.c diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 6df3ee561d52..6df3ee561d52 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/intersil/hostap/hostap_info.c index 7635ac4f6679..7635ac4f6679 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/intersil/hostap/hostap_info.c diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index 3e5fa7872b64..3e5fa7872b64 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 80d4228ba754..80d4228ba754 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/intersil/hostap/hostap_pci.c index c864ef4b0015..c864ef4b0015 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/intersil/hostap/hostap_pci.c diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/intersil/hostap/hostap_plx.c index 4901a99c6c59..4901a99c6c59 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_plx.c diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c index dd84557cf957..dd84557cf957 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h index ca25283e1c92..ca25283e1c92 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/intersil/orinoco/Kconfig index f6fa3f4e294f..f6fa3f4e294f 100644 --- a/drivers/net/wireless/orinoco/Kconfig +++ b/drivers/net/wireless/intersil/orinoco/Kconfig diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile index bfdefb85abcd..bfdefb85abcd 100644 --- a/drivers/net/wireless/orinoco/Makefile +++ b/drivers/net/wireless/intersil/orinoco/Makefile diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c index 77e6c53040a3..77e6c53040a3 100644 --- a/drivers/net/wireless/orinoco/airport.c +++ b/drivers/net/wireless/intersil/orinoco/airport.c diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c index 0f6ea316e38e..0f6ea316e38e 100644 --- a/drivers/net/wireless/orinoco/cfg.c +++ b/drivers/net/wireless/intersil/orinoco/cfg.c diff --git a/drivers/net/wireless/orinoco/cfg.h b/drivers/net/wireless/intersil/orinoco/cfg.h index 3ddc96a06cd7..3ddc96a06cd7 100644 --- a/drivers/net/wireless/orinoco/cfg.h +++ b/drivers/net/wireless/intersil/orinoco/cfg.h diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/intersil/orinoco/fw.c index 400a35217644..400a35217644 100644 --- a/drivers/net/wireless/orinoco/fw.c +++ b/drivers/net/wireless/intersil/orinoco/fw.c diff --git a/drivers/net/wireless/orinoco/fw.h b/drivers/net/wireless/intersil/orinoco/fw.h index aca63e3c4b5b..aca63e3c4b5b 100644 --- a/drivers/net/wireless/orinoco/fw.h +++ b/drivers/net/wireless/intersil/orinoco/fw.h diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c index 43790fbea0e0..43790fbea0e0 100644 --- a/drivers/net/wireless/orinoco/hermes.c +++ b/drivers/net/wireless/intersil/orinoco/hermes.c diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/intersil/orinoco/hermes.h index 28a42448d329..28a42448d329 100644 --- a/drivers/net/wireless/orinoco/hermes.h +++ b/drivers/net/wireless/intersil/orinoco/hermes.h diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/intersil/orinoco/hermes_dld.c index 4a10b7aca043..4a10b7aca043 100644 --- a/drivers/net/wireless/orinoco/hermes_dld.c +++ b/drivers/net/wireless/intersil/orinoco/hermes_dld.c diff --git a/drivers/net/wireless/orinoco/hermes_dld.h b/drivers/net/wireless/intersil/orinoco/hermes_dld.h index b5377e232c63..b5377e232c63 100644 --- a/drivers/net/wireless/orinoco/hermes_dld.h +++ b/drivers/net/wireless/intersil/orinoco/hermes_dld.h diff --git a/drivers/net/wireless/orinoco/hermes_rid.h b/drivers/net/wireless/intersil/orinoco/hermes_rid.h index 42eb67dea1df..42eb67dea1df 100644 --- a/drivers/net/wireless/orinoco/hermes_rid.h +++ b/drivers/net/wireless/intersil/orinoco/hermes_rid.h diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c index e27e32851f1e..e27e32851f1e 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/intersil/orinoco/hw.c diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/intersil/orinoco/hw.h index 466d1ede76f1..466d1ede76f1 100644 --- a/drivers/net/wireless/orinoco/hw.h +++ b/drivers/net/wireless/intersil/orinoco/hw.h diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 7b5c554323c7..7b5c554323c7 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/intersil/orinoco/main.h index 5a8fec26136e..5a8fec26136e 100644 --- a/drivers/net/wireless/orinoco/main.h +++ b/drivers/net/wireless/intersil/orinoco/main.h diff --git a/drivers/net/wireless/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c index fce4a843e656..fce4a843e656 100644 --- a/drivers/net/wireless/orinoco/mic.c +++ b/drivers/net/wireless/intersil/orinoco/mic.c diff --git a/drivers/net/wireless/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h index 04d05bc566d6..04d05bc566d6 100644 --- a/drivers/net/wireless/orinoco/mic.h +++ b/drivers/net/wireless/intersil/orinoco/mic.h diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index eebd2be21ee9..eebd2be21ee9 100644 --- a/drivers/net/wireless/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c index a956f965a1e5..a956f965a1e5 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c index 048693b6c6c2..048693b6c6c2 100644 --- a/drivers/net/wireless/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c index 4938a2208a37..4938a2208a37 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c diff --git a/drivers/net/wireless/orinoco/orinoco_pci.h b/drivers/net/wireless/intersil/orinoco/orinoco_pci.h index 43f5b9f5a0b0..43f5b9f5a0b0 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.h diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c index 221352027779..221352027779 100644 --- a/drivers/net/wireless/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c index 20ce569b8a43..20ce569b8a43 100644 --- a/drivers/net/wireless/orinoco/orinoco_tmd.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index f2cd513d54b2..f2cd513d54b2 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c index 2c66166add70..2c66166add70 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/intersil/orinoco/scan.c diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/intersil/orinoco/scan.h index 27281fb0a6dc..27281fb0a6dc 100644 --- a/drivers/net/wireless/orinoco/scan.h +++ b/drivers/net/wireless/intersil/orinoco/scan.h diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c index b60048c95e0a..b60048c95e0a 100644 --- a/drivers/net/wireless/orinoco/spectrum_cs.c +++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c index 1d4dae422106..1d4dae422106 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/intersil/orinoco/wext.c diff --git a/drivers/net/wireless/orinoco/wext.h b/drivers/net/wireless/intersil/orinoco/wext.h index 1479f4e26dde..1479f4e26dde 100644 --- a/drivers/net/wireless/orinoco/wext.h +++ b/drivers/net/wireless/intersil/orinoco/wext.h diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/intersil/p54/Kconfig index cdafb8c73e82..cdafb8c73e82 100644 --- a/drivers/net/wireless/p54/Kconfig +++ b/drivers/net/wireless/intersil/p54/Kconfig diff --git a/drivers/net/wireless/p54/Makefile b/drivers/net/wireless/intersil/p54/Makefile index b542e68f1781..b542e68f1781 100644 --- a/drivers/net/wireless/p54/Makefile +++ b/drivers/net/wireless/intersil/p54/Makefile diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c index 2fe713eda7ad..2fe713eda7ad 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/intersil/p54/eeprom.c diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/intersil/p54/eeprom.h index 20ebe39a3f4e..20ebe39a3f4e 100644 --- a/drivers/net/wireless/p54/eeprom.h +++ b/drivers/net/wireless/intersil/p54/eeprom.h diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c index 257a9eadd595..257a9eadd595 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/intersil/p54/fwio.c diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/intersil/p54/led.c index 9a8fedd3c0f5..9a8fedd3c0f5 100644 --- a/drivers/net/wireless/p54/led.c +++ b/drivers/net/wireless/intersil/p54/led.c diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/intersil/p54/lmac.h index de1d46bf97df..de1d46bf97df 100644 --- a/drivers/net/wireless/p54/lmac.h +++ b/drivers/net/wireless/intersil/p54/lmac.h diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index 7805864e76f9..7805864e76f9 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 40b401ed6845..40b401ed6845 100644 --- a/drivers/net/wireless/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index 27a49068d32d..27a49068d32d 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/intersil/p54/p54pci.h index 68405c142f97..68405c142f97 100644 --- a/drivers/net/wireless/p54/p54pci.h +++ b/drivers/net/wireless/intersil/p54/p54pci.h diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index 7ab2f43ab425..7ab2f43ab425 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/intersil/p54/p54spi.h index dfaa62aaeb07..dfaa62aaeb07 100644 --- a/drivers/net/wireless/p54/p54spi.h +++ b/drivers/net/wireless/intersil/p54/p54spi.h diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/intersil/p54/p54spi_eeprom.h index 0b7bfb0adcf2..0b7bfb0adcf2 100644 --- a/drivers/net/wireless/p54/p54spi_eeprom.h +++ b/drivers/net/wireless/intersil/p54/p54spi_eeprom.h diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index 043bd1c23c19..043bd1c23c19 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/intersil/p54/p54usb.h index a5f5f0fea3bd..a5f5f0fea3bd 100644 --- a/drivers/net/wireless/p54/p54usb.h +++ b/drivers/net/wireless/intersil/p54/p54usb.h diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 24e5ff9a9272..24e5ff9a9272 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c diff --git a/drivers/net/wireless/prism54/Makefile b/drivers/net/wireless/intersil/prism54/Makefile index fad305c76737..fad305c76737 100644 --- a/drivers/net/wireless/prism54/Makefile +++ b/drivers/net/wireless/intersil/prism54/Makefile diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c index 333c1a2f882e..333c1a2f882e 100644 --- a/drivers/net/wireless/prism54/isl_38xx.c +++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/intersil/prism54/isl_38xx.h index 547ab885610b..547ab885610b 100644 --- a/drivers/net/wireless/prism54/isl_38xx.h +++ b/drivers/net/wireless/intersil/prism54/isl_38xx.h diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c index ecbb0546cf3e..48e8a978a832 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c @@ -2036,7 +2036,7 @@ format_event(islpci_private *priv, char *dest, const char *str, mlme->address, (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") : ""), mlme->code); - BUG_ON(n > IW_CUSTOM_MAX); + WARN_ON(n >= IW_CUSTOM_MAX); *length = n; } diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/intersil/prism54/isl_ioctl.h index 842a2549facc..842a2549facc 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.h diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/intersil/prism54/isl_oid.h index 83fec557997e..83fec557997e 100644 --- a/drivers/net/wireless/prism54/isl_oid.h +++ b/drivers/net/wireless/intersil/prism54/isl_oid.h diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c index 931cf440ff18..84a42012aeae 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c @@ -707,7 +707,9 @@ islpci_alloc_memory(islpci_private *priv) pci_map_single(priv->pdev, (void *) skb->data, MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); - if (!priv->pci_map_rx_address[counter]) { + if (pci_dma_mapping_error(priv->pdev, + priv->pci_map_rx_address[counter])) { + priv->pci_map_rx_address[counter] = 0; /* error mapping the buffer to device accessible memory address */ printk(KERN_ERR "failed to map skb DMA'able\n"); diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/intersil/prism54/islpci_dev.h index f6f088e05fe4..f6f088e05fe4 100644 --- a/drivers/net/wireless/prism54/islpci_dev.h +++ b/drivers/net/wireless/intersil/prism54/islpci_dev.h diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c index 674658f2e6ef..d83f6332019e 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c @@ -190,7 +190,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) pci_map_address = pci_map_single(priv->pdev, (void *) skb->data, skb->len, PCI_DMA_TODEVICE); - if (unlikely(pci_map_address == 0)) { + if (pci_dma_mapping_error(priv->pdev, pci_map_address)) { printk(KERN_WARNING "%s: cannot map buffer to PCI\n", ndev->name); goto drop_free; @@ -448,7 +448,8 @@ islpci_eth_receive(islpci_private *priv) pci_map_single(priv->pdev, (void *) skb->data, MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); - if (unlikely(!priv->pci_map_rx_address[index])) { + if (pci_dma_mapping_error(priv->pdev, + priv->pci_map_rx_address[index])) { /* error mapping the buffer to device accessible memory address */ DEBUG(SHOW_ERROR_MESSAGES, "Error mapping DMA address\n"); diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/intersil/prism54/islpci_eth.h index 80f50f1bc6f2..80f50f1bc6f2 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.h diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c index 300c846ea087..300c846ea087 100644 --- a/drivers/net/wireless/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/intersil/prism54/islpci_mgt.c index 0de14dfa68cc..53d7a1705e8e 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/intersil/prism54/islpci_mgt.c @@ -130,7 +130,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev) buf->pci_addr = pci_map_single(priv->pdev, buf->mem, MGMT_FRAME_SIZE, PCI_DMA_FROMDEVICE); - if (!buf->pci_addr) { + if (pci_dma_mapping_error(priv->pdev, buf->pci_addr)) { printk(KERN_WARNING "Failed to make memory DMA'able.\n"); return -ENOMEM; @@ -217,7 +217,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, err = -ENOMEM; buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len, PCI_DMA_TODEVICE); - if (!buf.pci_addr) { + if (pci_dma_mapping_error(priv->pdev, buf.pci_addr)) { printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", ndev->name); goto error_free; diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/intersil/prism54/islpci_mgt.h index 700c434c8803..700c434c8803 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.h +++ b/drivers/net/wireless/intersil/prism54/islpci_mgt.h diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/intersil/prism54/oid_mgt.c index 3a8d2dbcfecd..6528ed5b9b1d 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/intersil/prism54/oid_mgt.c @@ -424,7 +424,7 @@ mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data) void *cache, *_data = data; u32 oid; - BUG_ON(OID_NUM_LAST <= n); + BUG_ON(n >= OID_NUM_LAST); BUG_ON(extra > isl_oid[n].range); if (!priv->mib) @@ -485,7 +485,7 @@ mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len int dlen; u32 oid; - BUG_ON(OID_NUM_LAST <= n); + BUG_ON(n >= OID_NUM_LAST); dlen = isl_oid[n].size; oid = isl_oid[n].oid; @@ -524,7 +524,7 @@ mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data, void *cache, *_res = NULL; u32 oid; - BUG_ON(OID_NUM_LAST <= n); + BUG_ON(n >= OID_NUM_LAST); BUG_ON(extra > isl_oid[n].range); res->ptr = NULL; @@ -626,7 +626,7 @@ mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n) void mgt_set(islpci_private *priv, enum oid_num_t n, void *data) { - BUG_ON(OID_NUM_LAST <= n); + BUG_ON(n >= OID_NUM_LAST); BUG_ON(priv->mib[n] == NULL); memcpy(priv->mib[n], data, isl_oid[n].size); @@ -636,7 +636,7 @@ mgt_set(islpci_private *priv, enum oid_num_t n, void *data) void mgt_get(islpci_private *priv, enum oid_num_t n, void *res) { - BUG_ON(OID_NUM_LAST <= n); + BUG_ON(n >= OID_NUM_LAST); BUG_ON(priv->mib[n] == NULL); BUG_ON(res == NULL); diff --git a/drivers/net/wireless/prism54/oid_mgt.h b/drivers/net/wireless/intersil/prism54/oid_mgt.h index cf5141df8474..cf5141df8474 100644 --- a/drivers/net/wireless/prism54/oid_mgt.h +++ b/drivers/net/wireless/intersil/prism54/oid_mgt.h diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/intersil/prism54/prismcompat.h index bc1401eb4b9d..bc1401eb4b9d 100644 --- a/drivers/net/wireless/prism54/prismcompat.h +++ b/drivers/net/wireless/intersil/prism54/prismcompat.h diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c00a7daaa4bc..c32889a1e39c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -495,6 +495,9 @@ struct mac80211_hwsim_data { const struct ieee80211_regdomain *regd; struct ieee80211_channel *tmp_chan; + struct ieee80211_channel *roc_chan; + u32 roc_duration; + struct delayed_work roc_start; struct delayed_work roc_done; struct delayed_work hw_scan; struct cfg80211_scan_request *hw_scan_request; @@ -514,6 +517,7 @@ struct mac80211_hwsim_data { bool ps_poll_pending; struct dentry *debugfs; + uintptr_t pending_cookie; struct sk_buff_head pending; /* packets pending */ /* * Only radios in the same group can communicate together (the @@ -810,6 +814,9 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); + if (WARN_ON(!txrate)) + return; + if (!netif_running(hwsim_mon)) return; @@ -960,6 +967,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, unsigned int hwsim_flags = 0; int i; struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; + uintptr_t cookie; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); @@ -1018,7 +1026,10 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; /* We create a cookie to identify this skb */ - if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb)) + data->pending_cookie++; + cookie = data->pending_cookie; + info->rate_driver_data[0] = (void *)cookie; + if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie)) goto nla_put_failure; genlmsg_end(skb, msg_head); @@ -1247,6 +1258,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; bool ack; @@ -1292,6 +1304,22 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, ARRAY_SIZE(txi->control.rates)); txi->rate_driver_data[0] = channel; + + if (skb->len >= 24 + 8 && + ieee80211_is_probe_resp(hdr->frame_control)) { + /* fake header transmission time */ + struct ieee80211_mgmt *mgmt; + struct ieee80211_rate *txrate; + u64 ts; + + mgmt = (struct ieee80211_mgmt *)skb->data; + txrate = ieee80211_get_tx_rate(hw, txi); + ts = mac80211_hwsim_get_tsf_raw(); + mgmt->u.probe_resp.timestamp = + cpu_to_le64(ts + data->tsf_offset + + 24 * 8 * 10 / txrate->bitrate); + } + mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ @@ -1871,7 +1899,8 @@ static void hw_scan_work(struct work_struct *work) req->channels[hwsim->scan_chan_idx]->center_freq); hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx]; - if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR || + if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_RADAR) || !req->n_ssids) { dwell = 120; } else { @@ -1987,6 +2016,23 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, mutex_unlock(&hwsim->mutex); } +static void hw_roc_start(struct work_struct *work) +{ + struct mac80211_hwsim_data *hwsim = + container_of(work, struct mac80211_hwsim_data, roc_start.work); + + mutex_lock(&hwsim->mutex); + + wiphy_debug(hwsim->hw->wiphy, "hwsim ROC begins\n"); + hwsim->tmp_chan = hwsim->roc_chan; + ieee80211_ready_on_channel(hwsim->hw); + + ieee80211_queue_delayed_work(hwsim->hw, &hwsim->roc_done, + msecs_to_jiffies(hwsim->roc_duration)); + + mutex_unlock(&hwsim->mutex); +} + static void hw_roc_done(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = @@ -2014,16 +2060,14 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw, return -EBUSY; } - hwsim->tmp_chan = chan; + hwsim->roc_chan = chan; + hwsim->roc_duration = duration; mutex_unlock(&hwsim->mutex); wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n", chan->center_freq, duration); + ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50); - ieee80211_ready_on_channel(hw); - - ieee80211_queue_delayed_work(hw, &hwsim->roc_done, - msecs_to_jiffies(duration)); return 0; } @@ -2031,6 +2075,7 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *hwsim = hw->priv; + cancel_delayed_work_sync(&hwsim->roc_start); cancel_delayed_work_sync(&hwsim->roc_done); mutex_lock(&hwsim->mutex); @@ -2375,6 +2420,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb); } + INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); INIT_DELAYED_WORK(&data->roc_done, hw_roc_done); INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work); @@ -2411,6 +2457,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); @@ -2710,7 +2757,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, struct mac80211_hwsim_data *data2; struct ieee80211_tx_info *txi; struct hwsim_tx_rate *tx_attempts; - unsigned long ret_skb_ptr; + u64 ret_skb_cookie; struct sk_buff *skb, *tmp; const u8 *src; unsigned int hwsim_flags; @@ -2728,7 +2775,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); - ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); + ret_skb_cookie = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); data2 = get_hwsim_data_ref_from_addr(src); if (!data2) @@ -2736,7 +2783,12 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, /* look for the skb matching the cookie passed back from user */ skb_queue_walk_safe(&data2->pending, skb, tmp) { - if ((unsigned long)skb == ret_skb_ptr) { + u64 skb_cookie; + + txi = IEEE80211_SKB_CB(skb); + skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0]; + + if (skb_cookie == ret_skb_cookie) { skb_unlink(skb, &data2->pending); found = true; break; @@ -2827,10 +2879,25 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); - /* TODO: Check ATTR_FREQ if it exists, and maybe throw away off-channel - * packets? - */ - rx_status.freq = data2->channel->center_freq; + if (info->attrs[HWSIM_ATTR_FREQ]) { + /* throw away off-channel packets, but allow both the temporary + * ("hw" scan/remain-on-channel) and regular channel, since the + * internal datapath also allows this + */ + mutex_lock(&data2->mutex); + rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); + + if (rx_status.freq != data2->channel->center_freq && + (!data2->tmp_chan || + rx_status.freq != data2->tmp_chan->center_freq)) { + mutex_unlock(&data2->mutex); + goto out; + } + mutex_unlock(&data2->mutex); + } else { + rx_status.freq = data2->channel->center_freq; + } + rx_status.band = data2->channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); diff --git a/drivers/net/wireless/marvell/Kconfig b/drivers/net/wireless/marvell/Kconfig new file mode 100644 index 000000000000..4938c7ec0009 --- /dev/null +++ b/drivers/net/wireless/marvell/Kconfig @@ -0,0 +1,27 @@ +config WLAN_VENDOR_MARVELL + bool "Marvell devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_MARVELL + +source "drivers/net/wireless/marvell/libertas/Kconfig" +source "drivers/net/wireless/marvell/libertas_tf/Kconfig" +source "drivers/net/wireless/marvell/mwifiex/Kconfig" + +config MWL8K + tristate "Marvell 88W8xxx PCI/PCIe Wireless support" + depends on MAC80211 && PCI + ---help--- + This driver supports Marvell TOPDOG 802.11 wireless cards. + + To compile this driver as a module, choose M here: the module + will be called mwl8k. If unsure, say N. + +endif # WLAN_VENDOR_MARVELL diff --git a/drivers/net/wireless/marvell/Makefile b/drivers/net/wireless/marvell/Makefile new file mode 100644 index 000000000000..1b0a7d2bc8e6 --- /dev/null +++ b/drivers/net/wireless/marvell/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_LIBERTAS) += libertas/ + +obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ +obj-$(CONFIG_MWIFIEX) += mwifiex/ + +obj-$(CONFIG_MWL8K) += mwl8k.o diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig index e6268ceacbf1..e6268ceacbf1 100644 --- a/drivers/net/wireless/libertas/Kconfig +++ b/drivers/net/wireless/marvell/libertas/Kconfig diff --git a/drivers/net/wireless/libertas/LICENSE b/drivers/net/wireless/marvell/libertas/LICENSE index 8862742213b9..8862742213b9 100644 --- a/drivers/net/wireless/libertas/LICENSE +++ b/drivers/net/wireless/marvell/libertas/LICENSE diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/marvell/libertas/Makefile index eac72f7bd341..eac72f7bd341 100644 --- a/drivers/net/wireless/libertas/Makefile +++ b/drivers/net/wireless/marvell/libertas/Makefile diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/marvell/libertas/README index 1a554a685e91..1a554a685e91 100644 --- a/drivers/net/wireless/libertas/README +++ b/drivers/net/wireless/marvell/libertas/README diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 8317afd065b4..86955c416b30 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -1108,7 +1108,7 @@ static int lbs_associate(struct lbs_private *priv, size_t len, resp_ie_len; int status; int ret; - u8 *pos = &(cmd->iebuf[0]); + u8 *pos; u8 *tmp; lbs_deb_enter(LBS_DEB_CFG80211); @@ -1117,6 +1117,7 @@ static int lbs_associate(struct lbs_private *priv, ret = -ENOMEM; goto done; } + pos = &cmd->iebuf[0]; /* * cmd 50 00 diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/marvell/libertas/cfg.h index acccc2922401..acccc2922401 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/marvell/libertas/cfg.h diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 0387a5b380c8..0387a5b380c8 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h index 0c5444b02c64..0c5444b02c64 100644 --- a/drivers/net/wireless/libertas/cmd.h +++ b/drivers/net/wireless/marvell/libertas/cmd.h diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index e5442e8956f7..e5442e8956f7 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c index 26cbf1dcc662..faed1823c58e 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/marvell/libertas/debugfs.c @@ -56,19 +56,15 @@ static ssize_t lbs_sleepparams_write(struct file *file, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t buf_size, ret; + ssize_t ret; struct sleep_params sp; int p1, p2, p3, p4, p5, p6; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(user_buf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, user_buf, buf_size)) { - ret = -EFAULT; - goto out_unlock; - } ret = sscanf(buf, "%d %d %d %d %d %d", &p1, &p2, &p3, &p4, &p5, &p6); if (ret != 6) { ret = -EINVAL; @@ -88,7 +84,7 @@ static ssize_t lbs_sleepparams_write(struct file *file, ret = -EINVAL; out_unlock: - free_page(addr); + kfree(buf); return ret; } @@ -125,18 +121,14 @@ static ssize_t lbs_host_sleep_write(struct file *file, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t buf_size, ret; + ssize_t ret; int host_sleep; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(user_buf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, user_buf, buf_size)) { - ret = -EFAULT; - goto out_unlock; - } ret = sscanf(buf, "%d", &host_sleep); if (ret != 1) { ret = -EINVAL; @@ -162,7 +154,7 @@ static ssize_t lbs_host_sleep_write(struct file *file, ret = count; out_unlock: - free_page(addr); + kfree(buf); return ret; } @@ -281,21 +273,15 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, struct cmd_ds_802_11_subscribe_event *events; struct mrvl_ie_thresholds *tlv; struct lbs_private *priv = file->private_data; - ssize_t buf_size; int value, freq, new_mask; uint16_t curr_mask; char *buf; int ret; - buf = (char *)get_zeroed_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - ret = -EFAULT; - goto out_page; - } ret = sscanf(buf, "%d %d %d", &value, &freq, &new_mask); if (ret != 3) { ret = -EINVAL; @@ -343,7 +329,7 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, out_events: kfree(events); out_page: - free_page((unsigned long)buf); + kfree(buf); return ret; } @@ -472,22 +458,15 @@ static ssize_t lbs_rdmac_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->mac_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + return count; } static ssize_t lbs_wrmac_write(struct file *file, @@ -496,18 +475,14 @@ static ssize_t lbs_wrmac_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -520,7 +495,7 @@ static ssize_t lbs_wrmac_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -554,22 +529,16 @@ static ssize_t lbs_rdbbp_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->bbp_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + + return count; } static ssize_t lbs_wrbbp_write(struct file *file, @@ -578,18 +547,14 @@ static ssize_t lbs_wrbbp_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -602,7 +567,7 @@ static ssize_t lbs_wrbbp_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -636,22 +601,15 @@ static ssize_t lbs_rdrf_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->rf_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + return count; } static ssize_t lbs_wrrf_write(struct file *file, @@ -660,18 +618,14 @@ static ssize_t lbs_wrrf_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -684,7 +638,7 @@ static ssize_t lbs_wrrf_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -915,16 +869,9 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, if (cnt == 0) return 0; - pdata = kmalloc(cnt + 1, GFP_KERNEL); - if (pdata == NULL) - return 0; - - if (copy_from_user(pdata, buf, cnt)) { - lbs_deb_debugfs("Copy from user failed\n"); - kfree(pdata); - return 0; - } - pdata[cnt] = '\0'; + pdata = memdup_user_nul(buf, cnt); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); p0 = pdata; for (i = 0; i < num_of_items; i++) { diff --git a/drivers/net/wireless/libertas/debugfs.h b/drivers/net/wireless/marvell/libertas/debugfs.h index f2b9c7ffe0fd..f2b9c7ffe0fd 100644 --- a/drivers/net/wireless/libertas/debugfs.h +++ b/drivers/net/wireless/marvell/libertas/debugfs.h diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h index 84a3aa7ac570..84a3aa7ac570 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/marvell/libertas/decl.h diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/marvell/libertas/defs.h index 407784aca627..407784aca627 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/marvell/libertas/defs.h diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index 6bd1608992b0..6bd1608992b0 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c index f955b2d66ed6..f955b2d66ed6 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/marvell/libertas/ethtool.c diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/marvell/libertas/firmware.c index 51b92b5df119..51b92b5df119 100644 --- a/drivers/net/wireless/libertas/firmware.c +++ b/drivers/net/wireless/marvell/libertas/firmware.c diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h index 96726f79a1dd..96726f79a1dd 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/marvell/libertas/host.h diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c index f499efc6abcf..f499efc6abcf 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/marvell/libertas/if_cs.c diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 33ceda296c9c..68fd3a9779bd 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -228,7 +228,7 @@ static int if_sdio_handle_cmd(struct if_sdio_card *card, memcpy(priv->resp_buf[i], buffer, size); lbs_notify_command_response(priv, i); - spin_unlock_irqrestore(&card->priv->driver_lock, flags); + spin_unlock_irqrestore(&priv->driver_lock, flags); ret = 0; diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/marvell/libertas/if_sdio.h index 62fda3592f67..62fda3592f67 100644 --- a/drivers/net/wireless/libertas/if_sdio.h +++ b/drivers/net/wireless/marvell/libertas/if_sdio.h diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index 82c0796377aa..82c0796377aa 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/marvell/libertas/if_spi.h index e450e31fd11d..e450e31fd11d 100644 --- a/drivers/net/wireless/libertas/if_spi.h +++ b/drivers/net/wireless/marvell/libertas/if_spi.h diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index dff08a2896a3..dff08a2896a3 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/marvell/libertas/if_usb.h index 6e42eac331de..6e42eac331de 100644 --- a/drivers/net/wireless/libertas/if_usb.h +++ b/drivers/net/wireless/marvell/libertas/if_usb.h diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 8079560f4965..8079560f4965 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c index d0c881dd5846..d0c881dd5846 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/marvell/libertas/mesh.c diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h index 6603f341c874..6603f341c874 100644 --- a/drivers/net/wireless/libertas/mesh.h +++ b/drivers/net/wireless/marvell/libertas/mesh.h diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/marvell/libertas/radiotap.h index b3c8ea6d610e..b3c8ea6d610e 100644 --- a/drivers/net/wireless/libertas/radiotap.h +++ b/drivers/net/wireless/marvell/libertas/radiotap.h diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c index e446fed7b345..e446fed7b345 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/marvell/libertas/rx.c diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/marvell/libertas/tx.c index c025f9c18282..c025f9c18282 100644 --- a/drivers/net/wireless/libertas/tx.c +++ b/drivers/net/wireless/marvell/libertas/tx.c diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/marvell/libertas/types.h index cf1d9b047ee6..cf1d9b047ee6 100644 --- a/drivers/net/wireless/libertas/types.h +++ b/drivers/net/wireless/marvell/libertas/types.h diff --git a/drivers/net/wireless/marvell/libertas_tf/Kconfig b/drivers/net/wireless/marvell/libertas_tf/Kconfig new file mode 100644 index 000000000000..b5557af90048 --- /dev/null +++ b/drivers/net/wireless/marvell/libertas_tf/Kconfig @@ -0,0 +1,18 @@ +config LIBERTAS_THINFIRM + tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware" + depends on MAC80211 + select FW_LOADER + ---help--- + A library for Marvell Libertas 8xxx devices using thinfirm. + +config LIBERTAS_THINFIRM_DEBUG + bool "Enable full debugging output in the Libertas thin firmware module." + depends on LIBERTAS_THINFIRM + ---help--- + Debugging support. + +config LIBERTAS_THINFIRM_USB + tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware" + depends on LIBERTAS_THINFIRM && USB + ---help--- + A driver for Marvell Libertas 8388 USB devices using thinfirm. diff --git a/drivers/net/wireless/libertas_tf/Makefile b/drivers/net/wireless/marvell/libertas_tf/Makefile index ff5544d6ac9d..ff5544d6ac9d 100644 --- a/drivers/net/wireless/libertas_tf/Makefile +++ b/drivers/net/wireless/marvell/libertas_tf/Makefile diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c index 909ac3685010..909ac3685010 100644 --- a/drivers/net/wireless/libertas_tf/cmd.c +++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h index 4bd3dc5adf7c..4bd3dc5adf7c 100644 --- a/drivers/net/wireless/libertas_tf/deb_defs.h +++ b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 799a2efe5793..799a2efe5793 100644 --- a/drivers/net/wireless/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c diff --git a/drivers/net/wireless/libertas_tf/if_usb.h b/drivers/net/wireless/marvell/libertas_tf/if_usb.h index 6fa5b3f59efe..6fa5b3f59efe 100644 --- a/drivers/net/wireless/libertas_tf/if_usb.h +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.h diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index ad77b92d0b41..ad77b92d0b41 100644 --- a/drivers/net/wireless/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index a47f0acc099a..a47f0acc099a 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c index 59d23fb2365f..59d23fb2365f 100644 --- a/drivers/net/wireless/mwifiex/11ac.c +++ b/drivers/net/wireless/marvell/mwifiex/11ac.c diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h index 1ca92c7a8a4a..1ca92c7a8a4a 100644 --- a/drivers/net/wireless/mwifiex/11ac.h +++ b/drivers/net/wireless/marvell/mwifiex/11ac.h diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index 71a1b580796f..71a1b580796f 100644 --- a/drivers/net/wireless/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index c174e79e6df2..c174e79e6df2 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h index afdd58aa90de..afdd58aa90de 100644 --- a/drivers/net/wireless/mwifiex/11n.h +++ b/drivers/net/wireless/marvell/mwifiex/11n.h diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index aa498e0d2204..1efef3b8273d 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c @@ -203,8 +203,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_aggr->priority = skb_src->priority; skb_aggr->tstamp = skb_src->tstamp; - skb_aggr->tstamp = ktime_get_real(); - do { /* Check if AMSDU can accommodate this MSDU */ if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h index 0cd2a3eb6c17..0cd2a3eb6c17 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.h +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index b3970a8c9e48..09578c6cde59 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -48,7 +48,17 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, priv->wdev.iftype, 0, false); while (!skb_queue_empty(&list)) { + struct rx_packet_hdr *rx_hdr; + rx_skb = __skb_dequeue(&list); + rx_hdr = (struct rx_packet_hdr *)rx_skb->data; + if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + ntohs(rx_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) { + mwifiex_process_tdls_action_frame(priv, + (u8 *)rx_hdr, + skb->len); + } + ret = mwifiex_recv_packet(priv, rx_skb); if (ret == -1) mwifiex_dbg(priv->adapter, ERROR, diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h index 63ecea89b4ab..63ecea89b4ab 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.h +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig index 279167ddd293..279167ddd293 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/marvell/mwifiex/Kconfig diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile index fdfd9bf15ed4..fdfd9bf15ed4 100644 --- a/drivers/net/wireless/mwifiex/Makefile +++ b/drivers/net/wireless/marvell/mwifiex/Makefile diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README index 2f0f9b5609d0..2f0f9b5609d0 100644 --- a/drivers/net/wireless/mwifiex/README +++ b/drivers/net/wireless/marvell/mwifiex/README diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4073116e6e9f..e7adef72c05f 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -26,12 +26,10 @@ module_param(reg_alpha2, charp, 0); static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { { - .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | + .max = 3, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT), - }, - { - .max = 1, .types = BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_AP), }, }; @@ -827,18 +825,26 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: + priv->bss_num = mwifiex_get_unused_bss_num(adapter, + MWIFIEX_BSS_TYPE_STA); priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_type = MWIFIEX_BSS_TYPE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: + priv->bss_num = mwifiex_get_unused_bss_num(adapter, + MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; case NL80211_IFTYPE_P2P_GO: + priv->bss_num = mwifiex_get_unused_bss_num(adapter, + MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_UAP; priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; case NL80211_IFTYPE_AP: + priv->bss_num = mwifiex_get_unused_bss_num(adapter, + MWIFIEX_BSS_TYPE_UAP); priv->bss_type = MWIFIEX_BSS_TYPE_UAP; priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; @@ -1533,6 +1539,7 @@ static const u32 mwifiex_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_SMS4, WLAN_CIPHER_SUITE_AES_CMAC, }; @@ -1701,6 +1708,11 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, u8 deauth_mac[ETH_ALEN]; unsigned long flags; + if (!priv->bss_started && priv->wdev.cac_started) { + mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__); + mwifiex_abort_cac(priv); + } + if (list_empty(&priv->sta_list) || !priv->bss_started) return 0; @@ -2608,7 +2620,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-EINVAL); } - priv = mwifiex_get_unused_priv(adapter); + priv = mwifiex_get_unused_priv_by_bss_type( + adapter, MWIFIEX_BSS_TYPE_STA); if (!priv) { mwifiex_dbg(adapter, ERROR, "could not get free private struct\n"); @@ -2627,7 +2640,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; priv->bss_priority = 0; priv->bss_role = MWIFIEX_BSS_ROLE_STA; - priv->bss_num = adapter->curr_iface_comb.sta_intf; break; case NL80211_IFTYPE_AP: @@ -2638,7 +2650,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-EINVAL); } - priv = mwifiex_get_unused_priv(adapter); + priv = mwifiex_get_unused_priv_by_bss_type( + adapter, MWIFIEX_BSS_TYPE_UAP); if (!priv) { mwifiex_dbg(adapter, ERROR, "could not get free private struct\n"); @@ -2653,7 +2666,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->bss_priority = 0; priv->bss_role = MWIFIEX_BSS_ROLE_UAP; priv->bss_started = 0; - priv->bss_num = adapter->curr_iface_comb.uap_intf; priv->bss_mode = type; break; @@ -2665,7 +2677,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-EINVAL); } - priv = mwifiex_get_unused_priv(adapter); + priv = mwifiex_get_unused_priv_by_bss_type( + adapter, MWIFIEX_BSS_TYPE_P2P); if (!priv) { mwifiex_dbg(adapter, ERROR, "could not get free private struct\n"); @@ -2689,7 +2702,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->bss_priority = MWIFIEX_BSS_ROLE_STA; priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_started = 0; - priv->bss_num = adapter->curr_iface_comb.p2p_intf; if (mwifiex_cfg80211_init_p2p_client(priv)) { memset(&priv->wdev, 0, sizeof(priv->wdev)); @@ -2914,6 +2926,12 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq, dont_care_byte = true; } + /* wildcard bytes record as the offset + * before the valid byte + */ + if (!valid_byte_cnt && !dont_care_byte) + pat->pkt_offset++; + if (valid_byte_cnt > max_byte_seq) return false; } @@ -3141,8 +3159,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, memset(&hs_cfg, 0, sizeof(hs_cfg)); hs_cfg.is_invoke_hostcmd = false; hs_cfg.conditions = HS_CFG_COND_MAC_EVENT; - hs_cfg.gpio = HS_CFG_GPIO_DEF; - hs_cfg.gap = HS_CFG_GAP_DEF; + hs_cfg.gpio = adapter->hs_cfg.gpio; + hs_cfg.gap = adapter->hs_cfg.gap; ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hs_cfg); if (ret) { @@ -3802,6 +3820,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->cipher_suites = mwifiex_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites); + if (adapter->region_code) + wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS | + REGULATORY_COUNTRY_IE_IGNORE; + ether_addr_copy(wiphy->perm_addr, adapter->perm_addr); wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | @@ -3862,11 +3884,15 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) "driver hint alpha2: %2.2s\n", reg_alpha2); regulatory_hint(wiphy, reg_alpha2); } else { - country_code = mwifiex_11d_code_2_region(adapter->region_code); - if (country_code) - mwifiex_dbg(adapter, WARN, - "ignoring F/W country code %2.2s\n", - country_code); + if (adapter->region_code == 0x00) { + mwifiex_dbg(adapter, WARN, "Ignore world regulatory domain\n"); + } else { + country_code = + mwifiex_11d_code_2_region(adapter->region_code); + if (country_code && + regulatory_hint(wiphy, country_code)) + mwifiex_dbg(priv->adapter, ERROR, "regulatory_hint() failed\n"); + } } mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h index 908367857d58..908367857d58 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.h +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index 3ddb8ec676ed..09fae27140f7 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -66,8 +66,8 @@ static u8 supported_rates_bg[BG_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x0c, 0x12, 0x16, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, 0 }; -u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30, - 0x32, 0x40, 0x41, 0xff }; +u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x00, 0x10, 0x20, 0x30, + 0x31, 0x32, 0x40, 0x41, 0x50 }; static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; @@ -168,7 +168,7 @@ struct region_code_mapping { static struct region_code_mapping region_code_mapping_t[] = { { 0x10, "US " }, /* US FCC */ { 0x20, "CA " }, /* IC Canada */ - { 0x30, "EU " }, /* ETSI */ + { 0x30, "FR " }, /* France */ { 0x31, "ES " }, /* Spain */ { 0x32, "FR " }, /* France */ { 0x40, "JP " }, /* Japan */ diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 45ae38e32621..cb25aa7e90db 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1637,9 +1637,9 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, if (adapter->region_code == region_code_index[i]) break; - /* If it's unidentified region code, use the default (USA) */ + /* If it's unidentified region code, use the default (world) */ if (i >= MWIFIEX_MAX_REGION_CODE) { - adapter->region_code = 0x10; + adapter->region_code = 0x00; mwifiex_dbg(adapter, WARN, "cmd: unknown region code, use default (USA)\n"); } diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 9824d8dd2b44..0b9c580af988 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -447,20 +447,13 @@ static ssize_t mwifiex_regrdwr_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *) addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret; u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX; - if (!buf) - return -ENOMEM; - - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value); @@ -474,7 +467,7 @@ mwifiex_regrdwr_write(struct file *file, ret = count; } done: - free_page(addr); + kfree(buf); return ret; } @@ -572,17 +565,11 @@ mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, int ret; unsigned long debug_mask; struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (void *)addr; - size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + char *buf; - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); if (kstrtoul(buf, 0, &debug_mask)) { ret = -EINVAL; @@ -592,7 +579,7 @@ mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, priv->adapter->debug_mask = debug_mask; ret = count; done: - free_page(addr); + kfree(buf); return ret; } @@ -609,17 +596,11 @@ mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, struct mwifiex_ds_mem_rw mem_rw; u16 cmd_action; struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (void *)addr; - size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + char *buf; - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value); if (ret != 3) { @@ -645,7 +626,7 @@ mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, ret = count; done: - free_page(addr); + kfree(buf); return ret; } @@ -686,20 +667,13 @@ static ssize_t mwifiex_rdeeprom_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *) addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret = 0; int offset = -1, bytes = -1; - if (!buf) - return -ENOMEM; - - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); sscanf(buf, "%d %d", &offset, &bytes); @@ -712,7 +686,7 @@ mwifiex_rdeeprom_write(struct file *file, ret = count; } done: - free_page(addr); + kfree(buf); return ret; } @@ -771,21 +745,15 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret, arg_num; struct mwifiex_ds_hs_cfg hscfg; int conditions = HS_CFG_COND_DEF; u32 gpio = HS_CFG_GPIO_DEF, gap = HS_CFG_GAP_DEF; - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); arg_num = sscanf(buf, "%d %x %x", &conditions, &gpio, &gap); @@ -823,7 +791,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, priv->adapter->hs_enabling = false; ret = count; done: - free_page(addr); + kfree(buf); return ret; } @@ -906,6 +874,34 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf, return count; } +static ssize_t +mwifiex_reset_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = file->private_data; + struct mwifiex_adapter *adapter = priv->adapter; + char cmd; + bool result; + + if (copy_from_user(&cmd, ubuf, sizeof(cmd))) + return -EFAULT; + + if (strtobool(&cmd, &result)) + return -EINVAL; + + if (!result) + return -EINVAL; + + if (adapter->if_ops.card_reset) { + dev_info(adapter->dev, "Resetting per request\n"); + adapter->hw_status = MWIFIEX_HW_STATUS_RESET; + mwifiex_cancel_all_pending_cmd(adapter); + adapter->if_ops.card_reset(adapter); + } + + return count; +} + #define MWIFIEX_DFS_ADD_FILE(name) do { \ if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \ priv, &mwifiex_dfs_##name##_fops)) \ @@ -943,6 +939,7 @@ MWIFIEX_DFS_FILE_OPS(hscfg); MWIFIEX_DFS_FILE_OPS(histogram); MWIFIEX_DFS_FILE_OPS(debug_mask); MWIFIEX_DFS_FILE_OPS(timeshare_coex); +MWIFIEX_DFS_FILE_WRITE_OPS(reset); /* * This function creates the debug FS directory structure and the files. @@ -970,6 +967,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(histogram); MWIFIEX_DFS_ADD_FILE(debug_mask); MWIFIEX_DFS_ADD_FILE(timeshare_coex); + MWIFIEX_DFS_ADD_FILE(reset); } /* diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index 098e1f14dc9a..d9c15cd36f12 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -111,9 +111,9 @@ /* Rate index for OFDM 0 */ #define MWIFIEX_RATE_INDEX_OFDM0 4 -#define MWIFIEX_MAX_STA_NUM 1 -#define MWIFIEX_MAX_UAP_NUM 1 -#define MWIFIEX_MAX_P2P_NUM 1 +#define MWIFIEX_MAX_STA_NUM 3 +#define MWIFIEX_MAX_UAP_NUM 3 +#define MWIFIEX_MAX_P2P_NUM 3 #define MWIFIEX_A_BAND_START_FREQ 5000 diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c index 58400c69ab26..58400c69ab26 100644 --- a/drivers/net/wireless/mwifiex/ethtool.c +++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 1e1e81a0a8d4..ced7af2be29a 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -537,7 +537,7 @@ enum P2P_MODES { #define EVENT_GET_BSS_TYPE(event_cause) \ (((event_cause) >> 24) & 0x00ff) -#define MWIFIEX_MAX_PATTERN_LEN 20 +#define MWIFIEX_MAX_PATTERN_LEN 40 #define MWIFIEX_MAX_OFFSET_LEN 100 #define STACK_NBYTES 100 #define TYPE_DNUM 1 @@ -1092,9 +1092,15 @@ struct host_cmd_ds_802_11_ad_hoc_start { u8 data_rate[HOSTCMD_SUPPORTED_RATES]; } __packed; -struct host_cmd_ds_802_11_ad_hoc_result { +struct host_cmd_ds_802_11_ad_hoc_start_result { u8 pad[3]; u8 bssid[ETH_ALEN]; + u8 pad2[2]; + u8 result; +} __packed; + +struct host_cmd_ds_802_11_ad_hoc_join_result { + u8 result; } __packed; struct adhoc_bss_desc { @@ -2124,7 +2130,8 @@ struct host_cmd_ds_command { struct host_cmd_ds_802_11_associate_rsp associate_rsp; struct host_cmd_ds_802_11_deauthenticate deauth; struct host_cmd_ds_802_11_ad_hoc_start adhoc_start; - struct host_cmd_ds_802_11_ad_hoc_result adhoc_result; + struct host_cmd_ds_802_11_ad_hoc_start_result start_result; + struct host_cmd_ds_802_11_ad_hoc_join_result join_result; struct host_cmd_ds_802_11_ad_hoc_join adhoc_join; struct host_cmd_ds_802_11d_domain_info domain_info; struct host_cmd_ds_802_11d_domain_info_rsp domain_info_resp; diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index abf52d25b981..c488c3068abc 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -140,7 +140,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, - UAP_CUSTOM_IE_I, ie_list, false); + UAP_CUSTOM_IE_I, ie_list, true); return 0; } diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index de74a7773fb6..6f7876ec31b7 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -95,7 +95,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON | HostCmd_ACT_MAC_ETHERNETII_ENABLE; - priv->beacon_period = 100; /* beacon interval */ ; + priv->beacon_period = 100; /* beacon interval */ priv->attempted_bss_desc = NULL; memset(&priv->curr_bss_params, 0, sizeof(priv->curr_bss_params)); priv->listen_interval = MWIFIEX_DEFAULT_LISTEN_INTERVAL; diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 4f0174c64946..4f0174c64946 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 3cda1f956f0b..cc09a81dbf6a 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -1247,20 +1247,26 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, { int ret = 0; struct mwifiex_adapter *adapter = priv->adapter; - struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; + struct host_cmd_ds_802_11_ad_hoc_start_result *start_result = + &resp->params.start_result; + struct host_cmd_ds_802_11_ad_hoc_join_result *join_result = + &resp->params.join_result; struct mwifiex_bssdescriptor *bss_desc; - u16 reason_code; + u16 cmd = le16_to_cpu(resp->command); + u8 result; - adhoc_result = &resp->params.adhoc_result; + if (cmd == HostCmd_CMD_802_11_AD_HOC_START) + result = start_result->result; + else + result = join_result->result; bss_desc = priv->attempted_bss_desc; /* Join result code 0 --> SUCCESS */ - reason_code = le16_to_cpu(resp->result); - if (reason_code) { + if (result) { mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); if (priv->media_connected) - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, result); memset(&priv->curr_bss_params.bss_descriptor, 0x00, sizeof(struct mwifiex_bssdescriptor)); @@ -1278,7 +1284,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, /* Update the created network descriptor with the new BSSID */ memcpy(bss_desc->mac_address, - adhoc_result->bssid, ETH_ALEN); + start_result->bssid, ETH_ALEN); priv->adhoc_state = ADHOC_STARTED; } else { diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 969ca1e1f3e9..79c16de8743e 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -763,7 +763,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, spin_lock_irqsave(&priv->ack_status_lock, flags); id = idr_alloc(&priv->ack_status_frames, orig_skb, - 1, 0xff, GFP_ATOMIC); + 1, 0x10, GFP_ATOMIC); spin_unlock_irqrestore(&priv->ack_status_lock, flags); if (id >= 0) { diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 3959f1c97f4e..2f7f478ce04b 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -84,7 +84,7 @@ enum { #define MWIFIEX_KEY_BUFFER_SIZE 16 #define MWIFIEX_DEFAULT_LISTEN_INTERVAL 10 -#define MWIFIEX_MAX_REGION_CODE 7 +#define MWIFIEX_MAX_REGION_CODE 9 #define DEFAULT_BCN_AVG_FACTOR 8 #define DEFAULT_DATA_AVG_FACTOR 8 @@ -564,14 +564,14 @@ struct mwifiex_private { struct mwifiex_wep_key wep_key[NUM_WEP_KEYS]; u16 wep_key_curr_index; u8 wpa_ie[256]; - u8 wpa_ie_len; + u16 wpa_ie_len; u8 wpa_is_gtk_set; struct host_cmd_ds_802_11_key_material aes_key; struct host_cmd_ds_802_11_key_material_v2 aes_key_v2; u8 wapi_ie[256]; - u8 wapi_ie_len; + u16 wapi_ie_len; u8 *wps_ie; - u8 wps_ie_len; + u16 wps_ie_len; u8 wmm_required; u8 wmm_enabled; u8 wmm_qosinfo; @@ -1273,20 +1273,46 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter, } /* + * This function checks available bss_num when adding new interface or + * changing interface type. + */ +static inline u8 +mwifiex_get_unused_bss_num(struct mwifiex_adapter *adapter, u8 bss_type) +{ + u8 i, j; + int index[MWIFIEX_MAX_BSS_NUM]; + + memset(index, 0, sizeof(index)); + for (i = 0; i < adapter->priv_num; i++) + if (adapter->priv[i]) { + if (adapter->priv[i]->bss_type == bss_type && + !(adapter->priv[i]->bss_mode == + NL80211_IFTYPE_UNSPECIFIED)) { + index[adapter->priv[i]->bss_num] = 1; + } + } + for (j = 0; j < MWIFIEX_MAX_BSS_NUM; j++) + if (!index[j]) + return j; + return -1; +} + +/* * This function returns the first available unused private structure pointer. */ static inline struct mwifiex_private * -mwifiex_get_unused_priv(struct mwifiex_adapter *adapter) +mwifiex_get_unused_priv_by_bss_type(struct mwifiex_adapter *adapter, + u8 bss_type) { - int i; + u8 i; - for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - if (adapter->priv[i]->bss_mode == - NL80211_IFTYPE_UNSPECIFIED) - break; + for (i = 0; i < adapter->priv_num; i++) + if (adapter->priv[i]->bss_mode == + NL80211_IFTYPE_UNSPECIFIED) { + adapter->priv[i]->bss_num = + mwifiex_get_unused_bss_num(adapter, bss_type); + break; } - } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); } diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 21192b6f9c64..6d0dc40e20e5 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2129,14 +2129,14 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) struct mwifiex_adapter *adapter; if (!pdev) { - pr_debug("info: %s: pdev is NULL\n", (u8 *)pdev); + pr_err("info: %s: pdev is NULL\n", __func__); goto exit; } card = pci_get_drvdata(pdev); if (!card || !card->adapter) { - pr_debug("info: %s: card=%p adapter=%p\n", __func__, card, - card ? card->adapter : NULL); + pr_err("info: %s: card=%p adapter=%p\n", __func__, card, + card ? card->adapter : NULL); goto exit; } adapter = card->adapter; @@ -2473,50 +2473,44 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) pci_set_master(pdev); - mwifiex_dbg(adapter, INFO, - "try set_consistent_dma_mask(32)\n"); + pr_notice("try set_consistent_dma_mask(32)\n"); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - mwifiex_dbg(adapter, ERROR, - "set_dma_mask(32) failed\n"); + pr_err("set_dma_mask(32) failed\n"); goto err_set_dma_mask; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - mwifiex_dbg(adapter, ERROR, - "set_consistent_dma_mask(64) failed\n"); + pr_err("set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; } ret = pci_request_region(pdev, 0, DRV_NAME); if (ret) { - mwifiex_dbg(adapter, ERROR, - "req_reg(0) error\n"); + pr_err("req_reg(0) error\n"); goto err_req_region0; } card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { - mwifiex_dbg(adapter, ERROR, "iomap(0) error\n"); + pr_err("iomap(0) error\n"); ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); if (ret) { - mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n"); + pr_err("req_reg(2) error\n"); goto err_req_region2; } card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { - mwifiex_dbg(adapter, ERROR, - "iomap(2) error\n"); + pr_err("iomap(2) error\n"); ret = -EIO; goto err_iomap2; } - mwifiex_dbg(adapter, INFO, - "PCI memory map Virt0: %p PCI memory map Virt2: %p\n", - card->pci_mmap, card->pci_mmap1); + pr_notice("PCI memory map Virt0: %p PCI memory map Virt2: %p\n", + card->pci_mmap, card->pci_mmap1); card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); @@ -2599,6 +2593,30 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) kfree(card); } +static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) +{ + int ret; + struct pcie_service_card *card = adapter->card; + struct pci_dev *pdev = card->dev; + + if (pci_enable_msi(pdev) != 0) + pci_disable_msi(pdev); + else + card->msi_enable = 1; + + mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable); + + ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, + "MRVL_PCIE", pdev); + if (ret) { + pr_err("request_irq failed: ret=%d\n", ret); + adapter->card = NULL; + return -1; + } + + return 0; +} + /* * This function registers the PCIE device. * @@ -2606,23 +2624,16 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) */ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) { - int ret; struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; /* save adapter pointer in card */ card->adapter = adapter; + adapter->dev = &pdev->dev; - ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, - "MRVL_PCIE", pdev); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "request_irq failed: ret=%d\n", ret); - adapter->card = NULL; + if (mwifiex_pcie_request_irq(adapter)) return -1; - } - adapter->dev = &pdev->dev; adapter->tx_buf_size = card->pcie.tx_buf_size; adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 48e549c3b285..6fc28737b576 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -210,17 +210,17 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, .tx_rdptr = 0xC1A4, - .tx_wrptr = 0xC1A8, - .rx_rdptr = 0xC1A8, + .tx_wrptr = 0xC174, + .rx_rdptr = 0xC174, .rx_wrptr = 0xC1A4, .evt_rdptr = PCIE_SCRATCH_10_REG, .evt_wrptr = PCIE_SCRATCH_11_REG, .drv_rdy = PCIE_SCRATCH_12_REG, .tx_start_ptr = 16, .tx_mask = 0x0FFF0000, - .tx_wrap_mask = 0x01FF0000, + .tx_wrap_mask = 0x1FFF0000, .rx_mask = 0x00000FFF, - .rx_wrap_mask = 0x000001FF, + .rx_wrap_mask = 0x00001FFF, .tx_rollover_ind = BIT(28), .rx_rollover_ind = BIT(12), .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND, @@ -326,6 +326,7 @@ struct pcie_service_card { dma_addr_t sleep_cookie_pbase; void __iomem *pci_mmap; void __iomem *pci_mmap1; + int msi_enable; }; static inline int @@ -342,6 +343,7 @@ mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr) return 1; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: if (((card->txbd_wrptr & reg->tx_mask) == (rdptr & reg->tx_mask)) && ((card->txbd_wrptr & reg->tx_rollover_ind) == diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c20017ced566..c20017ced566 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 78a8474e1a3d..4c8cae682c89 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -796,8 +796,8 @@ mwifiex_sdio_interrupt(struct sdio_func *func) card = sdio_get_drvdata(func); if (!card || !card->adapter) { - pr_debug("int: func=%p card=%p adapter=%p\n", - func, card, card ? card->adapter : NULL); + pr_err("int: func=%p card=%p adapter=%p\n", + func, card, card ? card->adapter : NULL); return; } adapter = card->adapter; @@ -2053,8 +2053,19 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) /* Allocate skb pointer buffers */ card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) * card->mp_agg_pkt_limit, GFP_KERNEL); + if (!card->mpa_rx.skb_arr) { + kfree(card->mp_regs); + return -ENOMEM; + } + card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) * card->mp_agg_pkt_limit, GFP_KERNEL); + if (!card->mpa_rx.len_arr) { + kfree(card->mp_regs); + kfree(card->mpa_rx.skb_arr); + return -ENOMEM; + } + ret = mwifiex_alloc_sdio_mpa_buffers(adapter, card->mp_tx_agg_buf_size, card->mp_rx_agg_buf_size); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index b9fbc5cf6262..b9fbc5cf6262 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e486867a4c67..e486867a4c67 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 9ac7aa2431b4..9ac7aa2431b4 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index ff3ee9dfbbd5..ff3ee9dfbbd5 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index a6c8a4f7bfe9..6a4fc5d183cf 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -272,7 +272,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false; if (bss) { - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00) + mwifiex_process_country_ie(priv, bss); /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), @@ -758,7 +759,7 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, return -1; } memcpy(priv->wpa_ie, ie_data_ptr, ie_len); - priv->wpa_ie_len = (u8) ie_len; + priv->wpa_ie_len = ie_len; mwifiex_dbg(priv->adapter, CMD, "cmd: Set Wpa_ie_len=%d IE=%#x\n", priv->wpa_ie_len, priv->wpa_ie[0]); @@ -923,9 +924,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, if (encrypt_key->key_disable) { memset(&priv->wep_key[index], 0, sizeof(struct mwifiex_wep_key)); - if (wep_key->key_length) - goto done; - } + goto done; + } if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) enc_key = encrypt_key; @@ -1293,6 +1293,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, struct ieee_types_vendor_header *pvendor_ie; const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; + u16 unparsed_len = ie_len; + int find_wpa_ie = 0; /* If the passed length is zero, reset the buffer */ if (!ie_len) { @@ -1304,40 +1306,69 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, return -1; } pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; - /* Test to see if it is a WPA IE, if not, then it is a gen IE */ - if (((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && - (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) || - (pvendor_ie->element_id == WLAN_EID_RSN)) { - /* IE is a WPA/WPA2 IE so call set_wpa function */ - ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len); - priv->wps.session_enable = false; + while (pvendor_ie) { + if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) { + /* Test to see if it is a WPA IE, if not, then it is a + * gen IE + */ + if (!memcmp(pvendor_ie->oui, wpa_oui, + sizeof(wpa_oui))) { + find_wpa_ie = 1; + break; + } - return ret; - } else if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { + /* Test to see if it is a WPS IE, if so, enable + * wps session flag + */ + if (!memcmp(pvendor_ie->oui, wps_oui, + sizeof(wps_oui))) { + priv->wps.session_enable = true; + mwifiex_dbg(priv->adapter, MSG, + "info: WPS Session Enabled.\n"); + ret = mwifiex_set_wps_ie(priv, + (u8 *)pvendor_ie, + unparsed_len); + } + } + + if (pvendor_ie->element_id == WLAN_EID_RSN) { + find_wpa_ie = 1; + break; + } + + if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { /* IE is a WAPI IE so call set_wapi function */ - ret = mwifiex_set_wapi_ie(priv, ie_data_ptr, ie_len); + ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, + unparsed_len); + return ret; + } + unparsed_len -= (pvendor_ie->len + + sizeof(struct ieee_types_header)); + + if (unparsed_len <= sizeof(struct ieee_types_header)) + pvendor_ie = NULL; + else + pvendor_ie = (struct ieee_types_vendor_header *) + (((u8 *)pvendor_ie) + pvendor_ie->len + + sizeof(struct ieee_types_header)); + } + + if (find_wpa_ie) { + /* IE is a WPA/WPA2 IE so call set_wpa function */ + ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie, + unparsed_len); + priv->wps.session_enable = false; return ret; } + /* * Verify that the passed length is not larger than the * available space remaining in the buffer */ if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { - /* Test to see if it is a WPS IE, if so, enable - * wps session flag - */ - pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; - if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && - (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) { - priv->wps.session_enable = true; - mwifiex_dbg(priv->adapter, INFO, - "info: WPS Session Enabled.\n"); - ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len); - } - /* Append the passed data to the end of the genIeBuffer */ memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr, diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index d4d4cb1ce95b..00fcbda09349 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -215,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c index f6683ea6bd5d..f6683ea6bd5d 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 9275f9c3f869..9275f9c3f869 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index bf6182b646a5..bf6182b646a5 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 759a6ada5b0f..e791166d90c4 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -848,9 +848,9 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, - UAP_BSS_PARAMS_I, bss_cfg, false)) { + UAP_BSS_PARAMS_I, bss_cfg, true)) { mwifiex_dbg(priv->adapter, ERROR, - "Failed to set the SSID\n"); + "Failed to set AP configuration\n"); return -1; } @@ -865,7 +865,7 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, - HostCmd_ACT_GEN_SET, 0, NULL, false)) { + HostCmd_ACT_GEN_SET, 0, NULL, true)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start the BSS\n"); return -1; diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index 86ff54296f39..86ff54296f39 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 74d5d7238633..52f7981a8afc 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -310,8 +310,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - mwifiex_dbg(adapter, ERROR, - "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index e43aff932360..e43aff932360 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h index b4e9246bbcdc..b4e9246bbcdc 100644 --- a/drivers/net/wireless/mwifiex/usb.h +++ b/drivers/net/wireless/marvell/mwifiex/usb.h diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cec8a64473e..0cec8a64473e 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h index b541d66c01eb..b541d66c01eb 100644 --- a/drivers/net/wireless/mwifiex/util.h +++ b/drivers/net/wireless/marvell/mwifiex/util.h diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index acccd6734e3b..acccd6734e3b 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h index 38f09762bd2f..38f09762bd2f 100644 --- a/drivers/net/wireless/mwifiex/wmm.h +++ b/drivers/net/wireless/marvell/mwifiex/wmm.h diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 30e3aaae32e2..30e3aaae32e2 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig index cba300c6b5da..28843fed750a 100644 --- a/drivers/net/wireless/mediatek/Kconfig +++ b/drivers/net/wireless/mediatek/Kconfig @@ -1,10 +1,14 @@ -menuconfig WL_MEDIATEK - bool "Mediatek Wireless LAN support" +config WLAN_VENDOR_MEDIATEK + bool "MediaTek devices" + default y ---help--- - Enable community drivers for MediaTek WiFi devices. - Those drivers make use of the Linux mac80211 stack. + If you have a wireless card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. -if WL_MEDIATEK +if WLAN_VENDOR_MEDIATEK source "drivers/net/wireless/mediatek/mt7601u/Kconfig" -endif # WL_MEDIATEK +endif # WLAN_VENDOR_MEDIATEK diff --git a/drivers/net/wireless/ralink/Kconfig b/drivers/net/wireless/ralink/Kconfig new file mode 100644 index 000000000000..41dbf3130e2b --- /dev/null +++ b/drivers/net/wireless/ralink/Kconfig @@ -0,0 +1,16 @@ +config WLAN_VENDOR_RALINK + bool "Ralink devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_RALINK + +source "drivers/net/wireless/ralink/rt2x00/Kconfig" + +endif # WLAN_VENDOR_RALINK diff --git a/drivers/net/wireless/ralink/Makefile b/drivers/net/wireless/ralink/Makefile new file mode 100644 index 000000000000..f84c0a2e4f4d --- /dev/null +++ b/drivers/net/wireless/ralink/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_RT2X00) += rt2x00/ diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig index de62f5dcb62f..de62f5dcb62f 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/ralink/rt2x00/Kconfig diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/ralink/rt2x00/Makefile index 24a66015a495..24a66015a495 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/ralink/rt2x00/Makefile diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 9a3966cd6fbe..9a3966cd6fbe 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h index 0fd3a9d01a60..0fd3a9d01a60 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 1a6740b4d396..1a6740b4d396 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h index 573e87bcc553..573e87bcc553 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index b50d873145d5..d26018f30b7d 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -229,7 +229,10 @@ static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value) { - rt2500usb_register_read(rt2x00dev, offset, (u16 *)value); + u16 tmp; + + rt2500usb_register_read(rt2x00dev, offset, &tmp); + *value = tmp; } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h index 78cc035b2d17..78cc035b2d17 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 95c1d7c0a2f3..95c1d7c0a2f3 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 9733b31a780d..9733b31a780d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 440790b92b19..440790b92b19 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index de4790b41be7..de4790b41be7 100644 --- a/drivers/net/wireless/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h index b63312ce3f27..b63312ce3f27 100644 --- a/drivers/net/wireless/rt2x00/rt2800mmio.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 0af22573a2eb..0af22573a2eb 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/ralink/rt2x00/rt2800pci.h index 9dfef4607d6b..9dfef4607d6b 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.h diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index a985a5a7945e..a985a5a7945e 100644 --- a/drivers/net/wireless/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index bf9afbf46c1b..bf9afbf46c1b 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/ralink/rt2x00/rt2800usb.h index ea7cac095997..ea7cac095997 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.h diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 3282ddb766f4..3282ddb766f4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 7e8bb1198ae9..7e8bb1198ae9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c index a2fd05ba25ca..a2fd05ba25ca 100644 --- a/drivers/net/wireless/rt2x00/rt2x00crypto.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 90fdb02b55e7..90fdb02b55e7 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h index e65712c235bd..e65712c235bd 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 5639ed816813..5639ed816813 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h index 4c0e01b5d515..4c0e01b5d515 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dump.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c index 5813300f68a2..5813300f68a2 100644 --- a/drivers/net/wireless/rt2x00/rt2x00firmware.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/ralink/rt2x00/rt2x00leds.c index c681d04b506c..c681d04b506c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00leds.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00leds.c diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/ralink/rt2x00/rt2x00leds.h index b2c5269570da..b2c5269570da 100644 --- a/drivers/net/wireless/rt2x00/rt2x00leds.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00leds.h diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h index fb7c349ccc9c..fb7c349ccc9c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c index 017188e5a736..017188e5a736 100644 --- a/drivers/net/wireless/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 3c26ee65a415..3c26ee65a415 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c index f0178fd4fe5f..f0178fd4fe5f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h index 701c3127efb9..701c3127efb9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mmio.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c index d93db4b0371b..eb6dbcd4fddf 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c @@ -149,6 +149,7 @@ exit_free_device: ieee80211_free_hw(hw); exit_release_regions: + pci_clear_mwi(pci_dev); pci_release_regions(pci_dev); exit_disable_device: @@ -173,6 +174,7 @@ void rt2x00pci_remove(struct pci_dev *pci_dev) /* * Free the PCI device data. */ + pci_clear_mwi(pci_dev); pci_disable_device(pci_dev); pci_release_regions(pci_dev); } diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h index bc0ca5f58f38..bc0ca5f58f38 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 68b620b2462f..68b620b2462f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h index 2233b911a1d7..2233b911a1d7 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/ralink/rt2x00/rt2x00reg.h index 3cc541d13d67..3cc541d13d67 100644 --- a/drivers/net/wireless/rt2x00/rt2x00reg.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00reg.h diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index 69a0cdadb07f..69a0cdadb07f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h index 9948d355e9a4..9948d355e9a4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00soc.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 7627af6098eb..7627af6098eb 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h index 569363da00a2..569363da00a2 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index c0e730ea1b69..c0e730ea1b69 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h index 1442075a8382..1442075a8382 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 7081e13b4fd6..7081e13b4fd6 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/ralink/rt2x00/rt73usb.h index 4a4f235466d1..4a4f235466d1 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.h diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig new file mode 100644 index 000000000000..8a8ba2003964 --- /dev/null +++ b/drivers/net/wireless/realtek/Kconfig @@ -0,0 +1,18 @@ +config WLAN_VENDOR_REALTEK + bool "Realtek devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_REALTEK + +source "drivers/net/wireless/realtek/rtl818x/Kconfig" +source "drivers/net/wireless/realtek/rtlwifi/Kconfig" +source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig" + +endif # WLAN_VENDOR_REALTEK diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 53261d6f8578..451456835f87 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -3356,9 +3356,8 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) "Dot11 channel / HsMode(HsChnl)", wifi_dot11_chnl, bt_hson, wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], - coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); @@ -3409,17 +3408,9 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x ", + "\r\n %-35s = %7ph(%d)", GLBtInfoSrc8192e2Ant[i], - coex_sta->bt_info_c2h[i][0], - coex_sta->bt_info_c2h[i][1], - coex_sta->bt_info_c2h[i][2], - coex_sta->bt_info_c2h[i][3]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "%02x %02x %02x(%d)", - coex_sta->bt_info_c2h[i][4], - coex_sta->bt_info_c2h[i][5], - coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } } @@ -3453,10 +3444,8 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para[0], - coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], - coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, ps_tdma_case, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index c4acd403e5f6..7e239d3cea26 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -2457,10 +2457,9 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) "Dot11 channel / HsChnl(HsMode)", wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], - coex_dm->wifi_chnl_info[2]); + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); @@ -2525,15 +2524,9 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + "\r\n %-35s = %7ph(%d)", GLBtInfoSrc8723b1Ant[i], - coex_sta->bt_info_c2h[i][0], - coex_sta->bt_info_c2h[i][1], - coex_sta->bt_info_c2h[i][2], - coex_sta->bt_info_c2h[i][3], - coex_sta->bt_info_c2h[i][4], - coex_sta->bt_info_c2h[i][5], - coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } } @@ -2569,10 +2562,8 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) pstdmacase = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para[0], - coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], - coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, pstdmacase, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ", diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index f2b9d11adc9e..c43ab59a690a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -3215,9 +3215,8 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) "Dot11 channel / HsChnl(HsMode)", wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], - coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); @@ -3259,16 +3258,9 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x " - "%02x %02x %02x %02x(%d)", + "\r\n %-35s = %7ph(%d)", glbt_info_src_8723b_2ant[i], - coex_sta->bt_info_c2h[i][0], - coex_sta->bt_info_c2h[i][1], - coex_sta->bt_info_c2h[i][2], - coex_sta->bt_info_c2h[i][3], - coex_sta->bt_info_c2h[i][4], - coex_sta->bt_info_c2h[i][5], - coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } } @@ -3296,10 +3288,8 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para[0], - coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], - coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, ps_tdma_case, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index b72e5377bdbc..9cecf174a37d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -2302,10 +2302,9 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x ", + "\r\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], - coex_dm->wifi_chnl_info[2]); + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); @@ -2366,15 +2365,9 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + "\r\n %-35s = %7ph(%d)", glbt_info_src_8821a_1ant[i], - coex_sta->bt_info_c2h[i][0], - coex_sta->bt_info_c2h[i][1], - coex_sta->bt_info_c2h[i][2], - coex_sta->bt_info_c2h[i][3], - coex_sta->bt_info_c2h[i][4], - coex_sta->bt_info_c2h[i][5], - coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } } @@ -2412,13 +2405,9 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "\r\n %-35s = %5ph case-%d (auto:%d)", "PS TDMA", - coex_dm->ps_tdma_para[0], - coex_dm->ps_tdma_para[1], - coex_dm->ps_tdma_para[2], - coex_dm->ps_tdma_para[3], - coex_dm->ps_tdma_para[4], + coex_dm->ps_tdma_para, ps_tdma_case, coex_dm->auto_tdma_adjust); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index cf819f02ed23..044d914291c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -3393,10 +3393,9 @@ ex_halbtc8821a2ant_display_coex_info( wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x ", + "\r\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], - coex_dm->wifi_chnl_info[2]); + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); @@ -3454,15 +3453,9 @@ ex_halbtc8821a2ant_display_coex_info( for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + "\r\n %-35s = %7ph(%d)", glbt_info_src_8821a_2ant[i], - coex_sta->bt_info_c2h[i][0], - coex_sta->bt_info_c2h[i][1], - coex_sta->bt_info_c2h[i][2], - coex_sta->bt_info_c2h[i][3], - coex_sta->bt_info_c2h[i][4], - coex_sta->bt_info_c2h[i][5], - coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } } @@ -3494,11 +3487,9 @@ ex_halbtc8821a2ant_display_coex_info( if (!btcoexist->manual_control) { ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", + "\r\n %-35s = %5ph case-%d", "PS TDMA", - coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1], - coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3], - coex_dm->ps_tdma_para[4], ps_tdma_case); + coex_dm->ps_tdma_para, ps_tdma_case); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index c925a4dff599..4ae421ef30d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1833,8 +1833,7 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); + kfree_skb(pskb); /*this is wrong, fill_tx_cmddesc needs update*/ pdesc = &ring->desc[0]; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index f46c9d7f6528..7f471bff435c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) hw_queue); if (rx_remained_cnt == 0) return; - + buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[ + rtlpci->rx_ring[rxring_idx].idx]; + pdesc = (struct rtl_rx_desc *)skb->data; } else { /* rx descriptor */ pdesc = &rtlpci->rx_ring[rxring_idx].desc[ rtlpci->rx_ring[rxring_idx].idx]; @@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) new_skb = dev_alloc_skb(rtlpci->rxbuffersize); if (unlikely(!new_skb)) goto no_new; - if (rtlpriv->use_new_trx_flow) { - buffer_desc = - &rtlpci->rx_ring[rxring_idx].buffer_desc - [rtlpci->rx_ring[rxring_idx].idx]; - /*means rx wifi info*/ - pdesc = (struct rtl_rx_desc *)skb->data; - } memset(&rx_status , 0 , sizeof(rx_status)); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, (u8 *)pdesc, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 11344121c55e..47e32cb0ec1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) u8 tid; rtl8188ee_bt_reg_init(hw); - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; @@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; + rtlpriv->cfg->mod_params->disable_watchdog = + rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); if (!rtlpriv->psc.inactiveps) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index de6cb6c3a48c..4780bdc63b2b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("rtl8192ce: Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index fd4a5353d216..7c6f7f0d18c6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index b19d0398215f..c6e09a19de1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index e1fd27c888bf..31baca41ac2f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) @@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 3859b3e3d158..ff49a8c0ff61 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; + rtlpriv->cfg->mod_params->disable_watchdog = + rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .debug = DBG_EMERG, + .msi_support = false, + .disable_watchdog = false, }; static struct rtl_hal_cfg rtl8723e_hal_cfg = { @@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444); module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index d091f1d5f91e..a78eaeda0008 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); rtl8723be_bt_reg_init(hw); - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; + rtlpriv->cfg->mod_params->disable_watchdog = + rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, + .msi_support = false, + .disable_watchdog = false, + .debug = DBG_EMERG, }; static struct rtl_hal_cfg rtl8723be_hal_cfg = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index a2f5e89bedfe..6e518625edbe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -318,9 +318,7 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); - + kfree_skb(pskb); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pdesc = &ring->desc[0]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 142bdff4ed60..4159f9b14db6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -95,8 +95,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); rtl8821ae_bt_reg_init(hw); - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -168,12 +166,15 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; + rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; + rtlpriv->cfg->mod_params->sw_crypto = + rtlpriv->cfg->mod_params->sw_crypto; + rtlpriv->cfg->mod_params->disable_watchdog = + rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2721cf89fb16..aac1ed3f7bb4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, ieee80211_rx(hw, skb); else dev_kfree_skb_any(skb); + } else { + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig index 35245f994c10..7c5e4ca4e3d0 100644 --- a/drivers/net/wireless/rsi/Kconfig +++ b/drivers/net/wireless/rsi/Kconfig @@ -1,3 +1,16 @@ +config WLAN_VENDOR_RSI + bool "Redpine Signals Inc devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_RSI + config RSI_91X tristate "Redpine Signals Inc 91x WLAN driver support" depends on MAC80211 @@ -28,3 +41,5 @@ config RSI_USB ---help--- This option enables the USB bus support in rsi drivers. Select M (recommended), if you have a RSI 1x1 wireless module. + +endif # WLAN_VENDOR_RSI diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 8d110fd9eba1..e43b59d5b53b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1023,7 +1023,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) return -ENOMEM; } - selected_rates = kmalloc(2 * RSI_TBL_SZ, GFP_KERNEL); + selected_rates = kzalloc(2 * RSI_TBL_SZ, GFP_KERNEL); if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", __func__); @@ -1032,7 +1032,6 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) } memset(skb->data, 0, sizeof(struct rsi_auto_rate)); - memset(selected_rates, 0, 2 * RSI_TBL_SZ); auto_rate = (struct rsi_auto_rate *)skb->data; @@ -1227,7 +1226,7 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); mgmt_frame->desc_word[1] = cpu_to_le16(BLOCK_HW_QUEUE); - if (block_event == true) { + if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); mgmt_frame->desc_word[4] = cpu_to_le16(0xf); } else { diff --git a/drivers/net/wireless/st/Kconfig b/drivers/net/wireless/st/Kconfig new file mode 100644 index 000000000000..969b4f6e53b5 --- /dev/null +++ b/drivers/net/wireless/st/Kconfig @@ -0,0 +1,16 @@ +config WLAN_VENDOR_ST + bool "STMicroelectronics devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_ST + +source "drivers/net/wireless/st/cw1200/Kconfig" + +endif # WLAN_VENDOR_ST diff --git a/drivers/net/wireless/st/Makefile b/drivers/net/wireless/st/Makefile new file mode 100644 index 000000000000..a60d6350ba46 --- /dev/null +++ b/drivers/net/wireless/st/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CW1200) += cw1200/ diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/st/cw1200/Kconfig index 0880742eab17..0880742eab17 100644 --- a/drivers/net/wireless/cw1200/Kconfig +++ b/drivers/net/wireless/st/cw1200/Kconfig diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/st/cw1200/Makefile index b086aac6547a..b086aac6547a 100644 --- a/drivers/net/wireless/cw1200/Makefile +++ b/drivers/net/wireless/st/cw1200/Makefile diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index 92d299aa257c..92d299aa257c 100644 --- a/drivers/net/wireless/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/st/cw1200/bh.h index af6a4853728f..af6a4853728f 100644 --- a/drivers/net/wireless/cw1200/bh.h +++ b/drivers/net/wireless/st/cw1200/bh.h diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/st/cw1200/cw1200.h index 1ad7d3602520..1ad7d3602520 100644 --- a/drivers/net/wireless/cw1200/cw1200.h +++ b/drivers/net/wireless/st/cw1200/cw1200.h diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c index d3acc85932a5..d3acc85932a5 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c index a740083634d8..a740083634d8 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c index 34f97c31eecf..34f97c31eecf 100644 --- a/drivers/net/wireless/cw1200/debug.c +++ b/drivers/net/wireless/st/cw1200/debug.c diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/st/cw1200/debug.h index b525aba53bfc..b525aba53bfc 100644 --- a/drivers/net/wireless/cw1200/debug.h +++ b/drivers/net/wireless/st/cw1200/debug.h diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c index 30e7646d04af..30e7646d04af 100644 --- a/drivers/net/wireless/cw1200/fwio.c +++ b/drivers/net/wireless/st/cw1200/fwio.c diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/st/cw1200/fwio.h index ea3099362cdf..ea3099362cdf 100644 --- a/drivers/net/wireless/cw1200/fwio.h +++ b/drivers/net/wireless/st/cw1200/fwio.h diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/st/cw1200/hwbus.h index 8b2fc831c3de..8b2fc831c3de 100644 --- a/drivers/net/wireless/cw1200/hwbus.h +++ b/drivers/net/wireless/st/cw1200/hwbus.h diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/st/cw1200/hwio.c index ff230b7aeedd..ff230b7aeedd 100644 --- a/drivers/net/wireless/cw1200/hwio.c +++ b/drivers/net/wireless/st/cw1200/hwio.c diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/st/cw1200/hwio.h index ddf52669dc5b..ddf52669dc5b 100644 --- a/drivers/net/wireless/cw1200/hwio.h +++ b/drivers/net/wireless/st/cw1200/hwio.h diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index 0e51e27d2e3f..0e51e27d2e3f 100644 --- a/drivers/net/wireless/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c index d2202ae92bdd..d2202ae92bdd 100644 --- a/drivers/net/wireless/cw1200/pm.c +++ b/drivers/net/wireless/st/cw1200/pm.c diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/st/cw1200/pm.h index 3ed90ff22bb8..3ed90ff22bb8 100644 --- a/drivers/net/wireless/cw1200/pm.h +++ b/drivers/net/wireless/st/cw1200/pm.h diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c index 0ba5ef9b3e7b..0ba5ef9b3e7b 100644 --- a/drivers/net/wireless/cw1200/queue.c +++ b/drivers/net/wireless/st/cw1200/queue.c diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/st/cw1200/queue.h index 119f9c79c14e..119f9c79c14e 100644 --- a/drivers/net/wireless/cw1200/queue.h +++ b/drivers/net/wireless/st/cw1200/queue.h diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index bff81b8d4164..bff81b8d4164 100644 --- a/drivers/net/wireless/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/st/cw1200/scan.h index cc75459e5784..cc75459e5784 100644 --- a/drivers/net/wireless/cw1200/scan.h +++ b/drivers/net/wireless/st/cw1200/scan.h diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 95a7fdb3cc1c..06321c799c90 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -873,12 +873,6 @@ int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value) else val32 = 0; /* disabled */ - if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { - /* device is down, can _not_ set threshold */ - ret = -ENODEV; - goto out; - } - if (priv->rts_threshold == value) goto out; diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h index bebb3379017f..bebb3379017f 100644 --- a/drivers/net/wireless/cw1200/sta.h +++ b/drivers/net/wireless/st/cw1200/sta.h diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index d28bd49cb5fd..d28bd49cb5fd 100644 --- a/drivers/net/wireless/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/st/cw1200/txrx.h index 492a4e14213b..492a4e14213b 100644 --- a/drivers/net/wireless/cw1200/txrx.h +++ b/drivers/net/wireless/st/cw1200/txrx.h diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c index 9e0ca3048657..9e0ca3048657 100644 --- a/drivers/net/wireless/cw1200/wsm.c +++ b/drivers/net/wireless/st/cw1200/wsm.c diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h index 48086e849515..48086e849515 100644 --- a/drivers/net/wireless/cw1200/wsm.h +++ b/drivers/net/wireless/st/cw1200/wsm.h diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig index cbe1e7fef61b..92fbd6597e34 100644 --- a/drivers/net/wireless/ti/Kconfig +++ b/drivers/net/wireless/ti/Kconfig @@ -1,11 +1,15 @@ -menuconfig WL_TI - bool "TI Wireless LAN support" +config WLAN_VENDOR_TI + bool "Texas Instrument devices" + default y ---help--- - This section contains support for all the wireless drivers - for Texas Instruments WLAN chips, such as wl1251 and the wl12xx - family. + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. -if WL_TI +if WLAN_VENDOR_TI source "drivers/net/wireless/ti/wl1251/Kconfig" source "drivers/net/wireless/ti/wl12xx/Kconfig" source "drivers/net/wireless/ti/wl18xx/Kconfig" @@ -21,4 +25,4 @@ config WILINK_PLATFORM_DATA Small platform data bit needed to pass data to the sdio modules. -endif # WL_TI +endif # WLAN_VENDOR_TI diff --git a/drivers/net/wireless/ti/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig index 477a206c098e..7142ccf3a425 100644 --- a/drivers/net/wireless/ti/wl1251/Kconfig +++ b/drivers/net/wireless/ti/wl1251/Kconfig @@ -1,4 +1,4 @@ -menuconfig WL1251 +config WL1251 tristate "TI wl1251 driver support" depends on MAC80211 select FW_LOADER diff --git a/drivers/net/wireless/ti/wl12xx/conf.h b/drivers/net/wireless/ti/wl12xx/conf.h index 75e29897a0f5..a606ba9ef041 100644 --- a/drivers/net/wireless/ti/wl12xx/conf.h +++ b/drivers/net/wireless/ti/wl12xx/conf.h @@ -47,4 +47,237 @@ struct wl12xx_priv_conf { struct conf_memory_settings mem_wl127x; }; +enum wl12xx_sg_params { + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT master basic rate + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_BT_MASTER_MIN_BR = 0, + WL12XX_CONF_SG_ACL_BT_MASTER_MAX_BR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT slave basic rate + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_BR, + WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_BR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT master EDR + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_BT_MASTER_MIN_EDR, + WL12XX_CONF_SG_ACL_BT_MASTER_MAX_EDR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT slave EDR + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_EDR, + WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_EDR, + + /* + * The maximum time WLAN can gain the antenna + * in WLAN PSM / BT master/slave BR + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_BR, + WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_BR, + + /* + * The maximum time WLAN can gain the antenna + * in WLAN PSM / BT master/slave EDR + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_EDR, + WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_EDR, + + /* TODO: explain these values */ + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR, + WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR, + + WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR, + WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR, + WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_BR, + WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR, + WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_EDR, + WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR, + + /* + * Compensation percentage of probe requests when scan initiated + * during BT voice/ACL link. + * + * Range: 0 - 255 (%) + */ + WL12XX_CONF_SG_AUTO_SCAN_PROBE_REQ, + + /* + * Compensation percentage of probe requests when active scan initiated + * during BT voice + * + * Range: 0 - 255 (%) + */ + WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3, + + /* + * Compensation percentage of WLAN active scan window if initiated + * during BT A2DP + * + * Range: 0 - 1000 (%) + */ + WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT A2DP BR + * + * Range: 0 - 1000 (%) + */ + WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_BR, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT A2DP EDR + * + * Range: 0 - 1000 (%) + */ + WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_EDR, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT voice + * + * Range: 0 - 1000 (%) + */ + WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_HV3, + + /* TODO: explain these values */ + WL12XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN, + WL12XX_CONF_SG_BCN_HV3_COLL_THR_IN_PASSIVE_SCAN, + WL12XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN, + + /* + * Defines whether the SG will force WLAN host to enter/exit PSM + * + * Range: 1 - SG can force, 0 - host handles PSM + */ + WL12XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO, + + /* + * Defines antenna configuration (single/dual antenna) + * + * Range: 0 - single antenna, 1 - dual antenna + */ + WL12XX_CONF_SG_ANTENNA_CONFIGURATION, + + /* + * The threshold (percent) of max consecutive beacon misses before + * increasing priority of beacon reception. + * + * Range: 0 - 100 (%) + */ + WL12XX_CONF_SG_BEACON_MISS_PERCENT, + + /* + * Protection time of the DHCP procedure. + * + * Range: 0 - 100000 (ms) + */ + WL12XX_CONF_SG_DHCP_TIME, + + /* + * RX guard time before the beginning of a new BT voice frame during + * which no new WLAN trigger frame is transmitted. + * + * Range: 0 - 100000 (us) + */ + WL12XX_CONF_SG_RXT, + + /* + * TX guard time before the beginning of a new BT voice frame during + * which no new WLAN frame is transmitted. + * + * Range: 0 - 100000 (us) + */ + WL12XX_CONF_SG_TXT, + + /* + * Enable adaptive RXT/TXT algorithm. If disabled, the host values + * will be utilized. + * + * Range: 0 - disable, 1 - enable + */ + WL12XX_CONF_SG_ADAPTIVE_RXT_TXT, + + /* TODO: explain this value */ + WL12XX_CONF_SG_GENERAL_USAGE_BIT_MAP, + + /* + * Number of consecutive BT voice frames not interrupted by WLAN + * + * Range: 0 - 100 + */ + WL12XX_CONF_SG_HV3_MAX_SERVED, + + /* + * The used WLAN legacy service period during active BT ACL link + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_PS_POLL_TIMEOUT, + + /* + * The used WLAN UPSD service period during active BT ACL link + * + * Range: 0 - 255 (ms) + */ + WL12XX_CONF_SG_UPSD_TIMEOUT, + + WL12XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD, + WL12XX_CONF_SG_STA_RX_WINDOW_AFTER_DTIM, + WL12XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME, + + /* AP params */ + WL12XX_CONF_AP_BEACON_MISS_TX, + WL12XX_CONF_AP_RX_WINDOW_AFTER_BEACON, + WL12XX_CONF_AP_BEACON_WINDOW_INTERVAL, + WL12XX_CONF_AP_CONNECTION_PROTECTION_TIME, + WL12XX_CONF_AP_BT_ACL_VAL_BT_SERVE_TIME, + WL12XX_CONF_AP_BT_ACL_VAL_WL_SERVE_TIME, + + /* CTS Diluting params */ + WL12XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH, + WL12XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER, + + WL12XX_CONF_SG_TEMP_PARAM_1, + WL12XX_CONF_SG_TEMP_PARAM_2, + WL12XX_CONF_SG_TEMP_PARAM_3, + WL12XX_CONF_SG_TEMP_PARAM_4, + WL12XX_CONF_SG_TEMP_PARAM_5, + WL12XX_CONF_SG_TEMP_PARAM_6, + WL12XX_CONF_SG_TEMP_PARAM_7, + WL12XX_CONF_SG_TEMP_PARAM_8, + WL12XX_CONF_SG_TEMP_PARAM_9, + WL12XX_CONF_SG_TEMP_PARAM_10, + + WL12XX_CONF_SG_PARAMS_MAX, + WL12XX_CONF_SG_PARAMS_ALL = 0xff +}; + #endif /* __WL12XX_CONF_H__ */ diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index af0fe2e17151..a0d6cccc56f3 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -39,6 +39,7 @@ #include "scan.h" #include "event.h" #include "debugfs.h" +#include "conf.h" static char *fref_param; static char *tcxo_param; @@ -46,69 +47,69 @@ static char *tcxo_param; static struct wlcore_conf wl12xx_conf = { .sg = { .params = { - [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, - [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, - [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, - [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, - [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, - [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, - [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, - [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, - [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, - [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, + [WL12XX_CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, + [WL12XX_CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, + [WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, + [WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, + [WL12XX_CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, + [WL12XX_CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, + [WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, + [WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, + [WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, + [WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, + [WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, + [WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, + [WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, + [WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, + [WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, + [WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, + [WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, + [WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, + [WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, /* active scan params */ - [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, + [WL12XX_CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, + [WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, + [WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, /* passive scan params */ - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, + [WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_BR] = 800, + [WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_EDR] = 200, + [WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_HV3] = 200, /* passive scan in dual antenna params */ - [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, - [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, - [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, + [WL12XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, + [WL12XX_CONF_SG_BCN_HV3_COLL_THR_IN_PASSIVE_SCAN] = 0, + [WL12XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN] = 0, /* general params */ - [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, - [CONF_SG_ANTENNA_CONFIGURATION] = 0, - [CONF_SG_BEACON_MISS_PERCENT] = 60, - [CONF_SG_DHCP_TIME] = 5000, - [CONF_SG_RXT] = 1200, - [CONF_SG_TXT] = 1000, - [CONF_SG_ADAPTIVE_RXT_TXT] = 1, - [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, - [CONF_SG_HV3_MAX_SERVED] = 6, - [CONF_SG_PS_POLL_TIMEOUT] = 10, - [CONF_SG_UPSD_TIMEOUT] = 10, - [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, - [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, - [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, + [WL12XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, + [WL12XX_CONF_SG_ANTENNA_CONFIGURATION] = 0, + [WL12XX_CONF_SG_BEACON_MISS_PERCENT] = 60, + [WL12XX_CONF_SG_DHCP_TIME] = 5000, + [WL12XX_CONF_SG_RXT] = 1200, + [WL12XX_CONF_SG_TXT] = 1000, + [WL12XX_CONF_SG_ADAPTIVE_RXT_TXT] = 1, + [WL12XX_CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, + [WL12XX_CONF_SG_HV3_MAX_SERVED] = 6, + [WL12XX_CONF_SG_PS_POLL_TIMEOUT] = 10, + [WL12XX_CONF_SG_UPSD_TIMEOUT] = 10, + [WL12XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, + [WL12XX_CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, + [WL12XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, /* AP params */ - [CONF_AP_BEACON_MISS_TX] = 3, - [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, - [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, - [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, - [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, - [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, + [WL12XX_CONF_AP_BEACON_MISS_TX] = 3, + [WL12XX_CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, + [WL12XX_CONF_AP_BEACON_WINDOW_INTERVAL] = 2, + [WL12XX_CONF_AP_CONNECTION_PROTECTION_TIME] = 0, + [WL12XX_CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, + [WL12XX_CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, /* CTS Diluting params */ - [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, - [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, + [WL12XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, + [WL12XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, }, .state = CONF_SG_PROTECTIVE, }, @@ -1809,6 +1810,7 @@ static int wl12xx_setup(struct wl1271 *wl) BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS); BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS); + BUILD_BUG_ON(WL12XX_CONF_SG_PARAMS_MAX > WLCORE_CONF_SG_PARAMS_MAX); wl->rtable = wl12xx_rtable; wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS; diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h index 71f1ec448ba5..7aa880f14ccb 100644 --- a/drivers/net/wireless/ti/wl18xx/conf.h +++ b/drivers/net/wireless/ti/wl18xx/conf.h @@ -139,4 +139,94 @@ struct wl18xx_priv_conf { struct conf_ap_sleep_settings ap_sleep; } __packed; +enum wl18xx_sg_params { + WL18XX_CONF_SG_PARAM_0 = 0, + + /* Configuration Parameters */ + WL18XX_CONF_SG_ANTENNA_CONFIGURATION, + WL18XX_CONF_SG_ZIGBEE_COEX, + WL18XX_CONF_SG_TIME_SYNC, + + WL18XX_CONF_SG_PARAM_4, + WL18XX_CONF_SG_PARAM_5, + WL18XX_CONF_SG_PARAM_6, + WL18XX_CONF_SG_PARAM_7, + WL18XX_CONF_SG_PARAM_8, + WL18XX_CONF_SG_PARAM_9, + WL18XX_CONF_SG_PARAM_10, + WL18XX_CONF_SG_PARAM_11, + WL18XX_CONF_SG_PARAM_12, + WL18XX_CONF_SG_PARAM_13, + WL18XX_CONF_SG_PARAM_14, + WL18XX_CONF_SG_PARAM_15, + WL18XX_CONF_SG_PARAM_16, + WL18XX_CONF_SG_PARAM_17, + WL18XX_CONF_SG_PARAM_18, + WL18XX_CONF_SG_PARAM_19, + WL18XX_CONF_SG_PARAM_20, + WL18XX_CONF_SG_PARAM_21, + WL18XX_CONF_SG_PARAM_22, + WL18XX_CONF_SG_PARAM_23, + WL18XX_CONF_SG_PARAM_24, + WL18XX_CONF_SG_PARAM_25, + + /* Active Scan Parameters */ + WL18XX_CONF_SG_AUTO_SCAN_PROBE_REQ, + WL18XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3, + + WL18XX_CONF_SG_PARAM_28, + + /* Passive Scan Parameters */ + WL18XX_CONF_SG_PARAM_29, + WL18XX_CONF_SG_PARAM_30, + WL18XX_CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3, + + /* Passive Scan in Dual Antenna Parameters */ + WL18XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN, + WL18XX_CONF_SG_BEACON_HV3_COLL_TH_IN_PASSIVE_SCAN, + WL18XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN, + + /* General Parameters */ + WL18XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO, + WL18XX_CONF_SG_PARAM_36, + WL18XX_CONF_SG_BEACON_MISS_PERCENT, + WL18XX_CONF_SG_PARAM_38, + WL18XX_CONF_SG_RXT, + WL18XX_CONF_SG_UNUSED, + WL18XX_CONF_SG_ADAPTIVE_RXT_TXT, + WL18XX_CONF_SG_GENERAL_USAGE_BIT_MAP, + WL18XX_CONF_SG_HV3_MAX_SERVED, + WL18XX_CONF_SG_PARAM_44, + WL18XX_CONF_SG_PARAM_45, + WL18XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD, + WL18XX_CONF_SG_GEMINI_PARAM_47, + WL18XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME, + + /* AP Parameters */ + WL18XX_CONF_SG_AP_BEACON_MISS_TX, + WL18XX_CONF_SG_PARAM_50, + WL18XX_CONF_SG_AP_BEACON_WINDOW_INTERVAL, + WL18XX_CONF_SG_AP_CONNECTION_PROTECTION_TIME, + WL18XX_CONF_SG_PARAM_53, + WL18XX_CONF_SG_PARAM_54, + + /* CTS Diluting Parameters */ + WL18XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH, + WL18XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER, + + WL18XX_CONF_SG_TEMP_PARAM_1, + WL18XX_CONF_SG_TEMP_PARAM_2, + WL18XX_CONF_SG_TEMP_PARAM_3, + WL18XX_CONF_SG_TEMP_PARAM_4, + WL18XX_CONF_SG_TEMP_PARAM_5, + WL18XX_CONF_SG_TEMP_PARAM_6, + WL18XX_CONF_SG_TEMP_PARAM_7, + WL18XX_CONF_SG_TEMP_PARAM_8, + WL18XX_CONF_SG_TEMP_PARAM_9, + WL18XX_CONF_SG_TEMP_PARAM_10, + + WL18XX_CONF_SG_PARAMS_MAX, + WL18XX_CONF_SG_PARAMS_ALL = 0xff +}; + #endif /* __WL18XX_CONF_H__ */ diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index 09c7e098f460..719907a0a2c2 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -205,6 +205,8 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) mbox->sc_ssid, mbox->sc_pwd_len, mbox->sc_pwd); + if (vector & FW_LOGGER_INDICATION) + wlcore_event_fw_logger(wl); return 0; } diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index f3d4f13379cb..070de1274694 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -41,6 +41,7 @@ enum { SMART_CONFIG_SYNC_EVENT_ID = BIT(22), SMART_CONFIG_DECODE_EVENT_ID = BIT(23), TIME_SYNC_EVENT_ID = BIT(24), + FW_LOGGER_INDICATION = BIT(25), }; enum wl18xx_radar_types { diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 50cce42089a5..1bf26cc7374e 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -177,69 +177,80 @@ enum wl18xx_hw_rates { static struct wlcore_conf wl18xx_conf = { .sg = { .params = { - [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, - [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, - [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, - [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, - [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, - [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, - [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, - [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, - [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, - [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, - /* active scan params */ - [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, - /* passive scan params */ - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, - /* passive scan in dual antenna params */ - [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, - [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, - [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, - /* general params */ - [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, - [CONF_SG_ANTENNA_CONFIGURATION] = 0, - [CONF_SG_BEACON_MISS_PERCENT] = 60, - [CONF_SG_DHCP_TIME] = 5000, - [CONF_SG_RXT] = 1200, - [CONF_SG_TXT] = 1000, - [CONF_SG_ADAPTIVE_RXT_TXT] = 1, - [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, - [CONF_SG_HV3_MAX_SERVED] = 6, - [CONF_SG_PS_POLL_TIMEOUT] = 10, - [CONF_SG_UPSD_TIMEOUT] = 10, - [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, - [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, - [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, - /* AP params */ - [CONF_AP_BEACON_MISS_TX] = 3, - [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, - [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, - [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, - [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, - [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, - /* CTS Diluting params */ - [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, - [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, + [WL18XX_CONF_SG_PARAM_0] = 0, + /* Configuartion Parameters */ + [WL18XX_CONF_SG_ANTENNA_CONFIGURATION] = 0, + [WL18XX_CONF_SG_ZIGBEE_COEX] = 0, + [WL18XX_CONF_SG_TIME_SYNC] = 0, + [WL18XX_CONF_SG_PARAM_4] = 0, + [WL18XX_CONF_SG_PARAM_5] = 0, + [WL18XX_CONF_SG_PARAM_6] = 0, + [WL18XX_CONF_SG_PARAM_7] = 0, + [WL18XX_CONF_SG_PARAM_8] = 0, + [WL18XX_CONF_SG_PARAM_9] = 0, + [WL18XX_CONF_SG_PARAM_10] = 0, + [WL18XX_CONF_SG_PARAM_11] = 0, + [WL18XX_CONF_SG_PARAM_12] = 0, + [WL18XX_CONF_SG_PARAM_13] = 0, + [WL18XX_CONF_SG_PARAM_14] = 0, + [WL18XX_CONF_SG_PARAM_15] = 0, + [WL18XX_CONF_SG_PARAM_16] = 0, + [WL18XX_CONF_SG_PARAM_17] = 0, + [WL18XX_CONF_SG_PARAM_18] = 0, + [WL18XX_CONF_SG_PARAM_19] = 0, + [WL18XX_CONF_SG_PARAM_20] = 0, + [WL18XX_CONF_SG_PARAM_21] = 0, + [WL18XX_CONF_SG_PARAM_22] = 0, + [WL18XX_CONF_SG_PARAM_23] = 0, + [WL18XX_CONF_SG_PARAM_24] = 0, + [WL18XX_CONF_SG_PARAM_25] = 0, + /* Active Scan Parameters */ + [WL18XX_CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, + [WL18XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, + [WL18XX_CONF_SG_PARAM_28] = 0, + /* Passive Scan Parameters */ + [WL18XX_CONF_SG_PARAM_29] = 0, + [WL18XX_CONF_SG_PARAM_30] = 0, + [WL18XX_CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, + /* Passive Scan in Dual Antenna Parameters */ + [WL18XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, + [WL18XX_CONF_SG_BEACON_HV3_COLL_TH_IN_PASSIVE_SCAN] = 0, + [WL18XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN] = 0, + /* General Parameters */ + [WL18XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, + [WL18XX_CONF_SG_PARAM_36] = 0, + [WL18XX_CONF_SG_BEACON_MISS_PERCENT] = 60, + [WL18XX_CONF_SG_PARAM_38] = 0, + [WL18XX_CONF_SG_RXT] = 1200, + [WL18XX_CONF_SG_UNUSED] = 0, + [WL18XX_CONF_SG_ADAPTIVE_RXT_TXT] = 1, + [WL18XX_CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, + [WL18XX_CONF_SG_HV3_MAX_SERVED] = 6, + [WL18XX_CONF_SG_PARAM_44] = 0, + [WL18XX_CONF_SG_PARAM_45] = 0, + [WL18XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, + [WL18XX_CONF_SG_GEMINI_PARAM_47] = 0, + [WL18XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 0, + /* AP Parameters */ + [WL18XX_CONF_SG_AP_BEACON_MISS_TX] = 3, + [WL18XX_CONF_SG_PARAM_50] = 0, + [WL18XX_CONF_SG_AP_BEACON_WINDOW_INTERVAL] = 2, + [WL18XX_CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 30, + [WL18XX_CONF_SG_PARAM_53] = 0, + [WL18XX_CONF_SG_PARAM_54] = 0, + /* CTS Diluting Parameters */ + [WL18XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, + [WL18XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_1] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_2] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_3] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_4] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_5] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_6] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_7] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_8] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_9] = 0, + [WL18XX_CONF_SG_TEMP_PARAM_10] = 0, }, .state = CONF_SG_PROTECTIVE, }, @@ -461,7 +472,7 @@ static struct wlcore_conf wl18xx_conf = { }, .fwlog = { .mode = WL12XX_FWLOG_CONTINUOUS, - .mem_blocks = 2, + .mem_blocks = 0, .severity = 0, .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, .output = WL12XX_FWLOG_OUTPUT_DBG_PINS, @@ -584,7 +595,7 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = { .mem = { .start = 0x00A00000, .size = 0x00012000 }, .reg = { .start = 0x00807000, .size = 0x00005000 }, .mem2 = { .start = 0x00800000, .size = 0x0000B000 }, - .mem3 = { .start = 0x00000000, .size = 0x00000000 }, + .mem3 = { .start = 0x00401594, .size = 0x00001020 }, }, [PART_DOWN] = { .mem = { .start = 0x00000000, .size = 0x00014000 }, @@ -602,7 +613,7 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = { .mem = { .start = 0x00800000, .size = 0x000050FC }, .reg = { .start = 0x00B00404, .size = 0x00001000 }, .mem2 = { .start = 0x00C00000, .size = 0x00000400 }, - .mem3 = { .start = 0x00000000, .size = 0x00000000 }, + .mem3 = { .start = 0x00401594, .size = 0x00001020 }, }, [PART_PHY_INIT] = { .mem = { .start = WL18XX_PHY_INIT_MEM_ADDR, @@ -1029,7 +1040,8 @@ static int wl18xx_boot(struct wl1271 *wl) DFS_CHANNELS_CONFIG_COMPLETE_EVENT | SMART_CONFIG_SYNC_EVENT_ID | SMART_CONFIG_DECODE_EVENT_ID | - TIME_SYNC_EVENT_ID; + TIME_SYNC_EVENT_ID | + FW_LOGGER_INDICATION; wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID; @@ -1895,6 +1907,7 @@ static int wl18xx_setup(struct wl1271 *wl) BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS); BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS); + BUILD_BUG_ON(WL18XX_CONF_SG_PARAMS_MAX > WLCORE_CONF_SG_PARAMS_MAX); wl->rtable = wl18xx_rtable; wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS; diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig index 7c099542b214..969c9d79bfc8 100644 --- a/drivers/net/wireless/ti/wlcore/Kconfig +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -1,6 +1,6 @@ config WLCORE tristate "TI wlcore support" - depends on WL_TI && MAC80211 + depends on MAC80211 select FW_LOADER ---help--- This module contains the main code for TI WLAN chips. It abstracts diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index f28fa3b5029d..26cc23f32241 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -534,9 +534,9 @@ int wl12xx_acx_sg_cfg(struct wl1271 *wl) } /* BT-WLAN coext parameters */ - for (i = 0; i < CONF_SG_PARAMS_MAX; i++) + for (i = 0; i < WLCORE_CONF_SG_PARAMS_MAX; i++) param->params[i] = cpu_to_le32(c->params[i]); - param->param_idx = CONF_SG_PARAMS_ALL; + param->param_idx = WLCORE_CONF_SG_PARAMS_ALL; ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index 954d57ec98f4..0d61fae88dcb 100644 --- a/drivers/net/wireless/ti/wlcore/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -300,7 +300,7 @@ struct acx_bt_wlan_coex { struct acx_bt_wlan_coex_param { struct acx_header header; - __le32 params[CONF_SG_PARAMS_MAX]; + __le32 params[WLCORE_CONF_SG_PARAMS_MAX]; u8 param_idx; u8 padding[3]; } __packed; diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index 8dc46c0a489a..e28e2f2303ce 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -626,7 +626,6 @@ struct wl12xx_cmd_remove_peer { */ enum wl12xx_fwlogger_log_mode { WL12XX_FWLOG_CONTINUOUS, - WL12XX_FWLOG_ON_DEMAND }; /* Include/exclude timestamps from the log messages */ diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h index 52a9d1b14020..44d898fe0afc 100644 --- a/drivers/net/wireless/ti/wlcore/conf.h +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -110,242 +110,11 @@ enum { CONF_SG_OPPORTUNISTIC }; -enum { - /* - * Configure the min and max time BT gains the antenna - * in WLAN / BT master basic rate - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_BT_MASTER_MIN_BR = 0, - CONF_SG_ACL_BT_MASTER_MAX_BR, - - /* - * Configure the min and max time BT gains the antenna - * in WLAN / BT slave basic rate - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_BT_SLAVE_MIN_BR, - CONF_SG_ACL_BT_SLAVE_MAX_BR, - - /* - * Configure the min and max time BT gains the antenna - * in WLAN / BT master EDR - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_BT_MASTER_MIN_EDR, - CONF_SG_ACL_BT_MASTER_MAX_EDR, - - /* - * Configure the min and max time BT gains the antenna - * in WLAN / BT slave EDR - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_BT_SLAVE_MIN_EDR, - CONF_SG_ACL_BT_SLAVE_MAX_EDR, - - /* - * The maximum time WLAN can gain the antenna - * in WLAN PSM / BT master/slave BR - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_WLAN_PS_MASTER_BR, - CONF_SG_ACL_WLAN_PS_SLAVE_BR, - - /* - * The maximum time WLAN can gain the antenna - * in WLAN PSM / BT master/slave EDR - * - * Range: 0 - 255 (ms) - */ - CONF_SG_ACL_WLAN_PS_MASTER_EDR, - CONF_SG_ACL_WLAN_PS_SLAVE_EDR, - - /* TODO: explain these values */ - CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR, - CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR, - CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR, - CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR, - CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR, - CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR, - CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR, - CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR, - - CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR, - CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR, - CONF_SG_ACL_PASSIVE_SCAN_BT_BR, - CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR, - CONF_SG_ACL_PASSIVE_SCAN_BT_EDR, - CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR, - - /* - * Compensation percentage of probe requests when scan initiated - * during BT voice/ACL link. - * - * Range: 0 - 255 (%) - */ - CONF_SG_AUTO_SCAN_PROBE_REQ, - - /* - * Compensation percentage of probe requests when active scan initiated - * during BT voice - * - * Range: 0 - 255 (%) - */ - CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3, - - /* - * Compensation percentage of WLAN active scan window if initiated - * during BT A2DP - * - * Range: 0 - 1000 (%) - */ - CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP, - - /* - * Compensation percentage of WLAN passive scan window if initiated - * during BT A2DP BR - * - * Range: 0 - 1000 (%) - */ - CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR, - - /* - * Compensation percentage of WLAN passive scan window if initiated - * during BT A2DP EDR - * - * Range: 0 - 1000 (%) - */ - CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR, - - /* - * Compensation percentage of WLAN passive scan window if initiated - * during BT voice - * - * Range: 0 - 1000 (%) - */ - CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3, - - /* TODO: explain these values */ - CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN, - CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN, - CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN, - - /* - * Defines whether the SG will force WLAN host to enter/exit PSM - * - * Range: 1 - SG can force, 0 - host handles PSM - */ - CONF_SG_STA_FORCE_PS_IN_BT_SCO, - - /* - * Defines antenna configuration (single/dual antenna) - * - * Range: 0 - single antenna, 1 - dual antenna - */ - CONF_SG_ANTENNA_CONFIGURATION, - - /* - * The threshold (percent) of max consecutive beacon misses before - * increasing priority of beacon reception. - * - * Range: 0 - 100 (%) - */ - CONF_SG_BEACON_MISS_PERCENT, - - /* - * Protection time of the DHCP procedure. - * - * Range: 0 - 100000 (ms) - */ - CONF_SG_DHCP_TIME, - - /* - * RX guard time before the beginning of a new BT voice frame during - * which no new WLAN trigger frame is transmitted. - * - * Range: 0 - 100000 (us) - */ - CONF_SG_RXT, - - /* - * TX guard time before the beginning of a new BT voice frame during - * which no new WLAN frame is transmitted. - * - * Range: 0 - 100000 (us) - */ - - CONF_SG_TXT, - - /* - * Enable adaptive RXT/TXT algorithm. If disabled, the host values - * will be utilized. - * - * Range: 0 - disable, 1 - enable - */ - CONF_SG_ADAPTIVE_RXT_TXT, - - /* TODO: explain this value */ - CONF_SG_GENERAL_USAGE_BIT_MAP, - - /* - * Number of consecutive BT voice frames not interrupted by WLAN - * - * Range: 0 - 100 - */ - CONF_SG_HV3_MAX_SERVED, - - /* - * The used WLAN legacy service period during active BT ACL link - * - * Range: 0 - 255 (ms) - */ - CONF_SG_PS_POLL_TIMEOUT, - - /* - * The used WLAN UPSD service period during active BT ACL link - * - * Range: 0 - 255 (ms) - */ - CONF_SG_UPSD_TIMEOUT, - - CONF_SG_CONSECUTIVE_CTS_THRESHOLD, - CONF_SG_STA_RX_WINDOW_AFTER_DTIM, - CONF_SG_STA_CONNECTION_PROTECTION_TIME, - - /* AP params */ - CONF_AP_BEACON_MISS_TX, - CONF_AP_RX_WINDOW_AFTER_BEACON, - CONF_AP_BEACON_WINDOW_INTERVAL, - CONF_AP_CONNECTION_PROTECTION_TIME, - CONF_AP_BT_ACL_VAL_BT_SERVE_TIME, - CONF_AP_BT_ACL_VAL_WL_SERVE_TIME, - - /* CTS Diluting params */ - CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH, - CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER, - - CONF_SG_TEMP_PARAM_1, - CONF_SG_TEMP_PARAM_2, - CONF_SG_TEMP_PARAM_3, - CONF_SG_TEMP_PARAM_4, - CONF_SG_TEMP_PARAM_5, - CONF_SG_TEMP_PARAM_6, - CONF_SG_TEMP_PARAM_7, - CONF_SG_TEMP_PARAM_8, - CONF_SG_TEMP_PARAM_9, - CONF_SG_TEMP_PARAM_10, - - CONF_SG_PARAMS_MAX, - CONF_SG_PARAMS_ALL = 0xff -}; +#define WLCORE_CONF_SG_PARAMS_MAX 67 +#define WLCORE_CONF_SG_PARAMS_ALL 0xff struct conf_sg_settings { - u32 params[CONF_SG_PARAMS_MAX]; + u32 params[WLCORE_CONF_SG_PARAMS_MAX]; u8 state; } __packed; diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index eb43f94a1597..7f672f6879d0 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1205,26 +1205,11 @@ err_out: static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig) { - loff_t ret; - /* only requests of dword-aligned size and offset are supported */ if (offset % 4) return -EINVAL; - switch (orig) { - case SEEK_SET: - file->f_pos = offset; - ret = file->f_pos; - break; - case SEEK_CUR: - file->f_pos += offset; - ret = file->f_pos; - break; - default: - ret = -EINVAL; - } - - return ret; + return no_seek_end_llseek(file, offset, orig); } static const struct file_operations dev_mem_ops = { @@ -1234,6 +1219,65 @@ static const struct file_operations dev_mem_ops = { .llseek = dev_mem_seek, }; +static ssize_t fw_logger_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(user_buf, count, + ppos, "%d\n", + wl->conf.fwlog.output); +} + +static ssize_t fw_logger_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &value); + if (ret < 0) { + wl1271_warning("illegal value in fw_logger"); + return -EINVAL; + } + + if ((value > 2) || (value == 0)) { + wl1271_warning("fw_logger value must be 1-UART 2-SDIO"); + return -ERANGE; + } + + if (wl->conf.fwlog.output == 0) { + wl1271_warning("iligal opperation - fw logger disabled by default, please change mode via wlconf"); + return -EINVAL; + } + + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) { + count = ret; + goto out; + } + + wl->conf.fwlog.output = value; + + ret = wl12xx_cmd_config_fwlog(wl); + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations fw_logger_ops = { + .open = simple_open, + .read = fw_logger_read, + .write = fw_logger_write, + .llseek = default_llseek, +}; + static int wl1271_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { @@ -1260,6 +1304,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(irq_timeout, rootdir); DEBUGFS_ADD(fw_stats_raw, rootdir); DEBUGFS_ADD(sleep_auth, rootdir); + DEBUGFS_ADD(fw_logger, rootdir); streaming = debugfs_create_dir("rx_streaming", rootdir); if (!streaming || IS_ERR(streaming)) diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index c42e78955e7b..c96405498bf4 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -28,6 +28,88 @@ #include "ps.h" #include "scan.h" #include "wl12xx_80211.h" +#include "hw_ops.h" + +#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020) +#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000) +#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c) +#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information)) +#define WL18XX_LOGGER_READ_POINT_OFFSET (12) + +int wlcore_event_fw_logger(struct wl1271 *wl) +{ + u32 ret; + struct fw_logger_information fw_log; + u8 *buffer; + u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS; + u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR; + u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR + + WL18XX_LOGGER_BUFF_OFFSET; + u32 available_len; + u32 actual_len; + u32 clear_addr; + size_t len; + u32 start_loc; + + buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL); + if (!buffer) { + wl1271_error("Fail to allocate fw logger memory"); + fw_log.actual_buff_size = cpu_to_le32(0); + goto out; + } + + ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX, + false); + if (ret < 0) { + wl1271_error("Fail to read logger buffer, error_id = %d", + ret); + fw_log.actual_buff_size = cpu_to_le32(0); + goto free_out; + } + + memcpy(&fw_log, buffer, sizeof(fw_log)); + + if (le32_to_cpu(fw_log.actual_buff_size) == 0) + goto free_out; + + actual_len = le32_to_cpu(fw_log.actual_buff_size); + start_loc = (le32_to_cpu(fw_log.buff_read_ptr) - + internal_fw_addrbase) - addr; + end_buff_addr += le32_to_cpu(fw_log.max_buff_size); + available_len = end_buff_addr - + (le32_to_cpu(fw_log.buff_read_ptr) - + internal_fw_addrbase); + actual_len = min(actual_len, available_len); + len = actual_len; + + wl12xx_copy_fwlog(wl, &buffer[start_loc], len); + clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) + + internal_fw_addrbase; + + len = le32_to_cpu(fw_log.actual_buff_size) - len; + if (len) { + wl12xx_copy_fwlog(wl, + &buffer[WL18XX_LOGGER_BUFF_OFFSET], + len); + clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len + + internal_fw_addrbase; + } + + /* double check that clear address and write pointer are the same */ + if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) { + wl1271_error("Calculate of clear addr Clear = %x, write = %x", + clear_addr, le32_to_cpu(fw_log.buff_write_ptr)); + } + + /* indicate FW about Clear buffer */ + ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET, + fw_log.buff_write_ptr); +free_out: + kfree(buffer); +out: + return le32_to_cpu(fw_log.actual_buff_size); +} +EXPORT_SYMBOL_GPL(wlcore_event_fw_logger); void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr) { diff --git a/drivers/net/wireless/ti/wlcore/event.h b/drivers/net/wireless/ti/wlcore/event.h index acc7a59d3828..75e8e98da2fe 100644 --- a/drivers/net/wireless/ti/wlcore/event.h +++ b/drivers/net/wireless/ti/wlcore/event.h @@ -64,6 +64,14 @@ enum { #define NUM_OF_RSSI_SNR_TRIGGERS 8 +struct fw_logger_information { + __le32 max_buff_size; + __le32 actual_buff_size; + __le32 num_trace_drop; + __le32 buff_read_ptr; + __le32 buff_write_ptr; +} __packed; + struct wl1271; int wl1271_event_unmask(struct wl1271 *wl); @@ -84,4 +92,5 @@ void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap); void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap); void wlcore_event_roc_complete(struct wl1271 *wl); void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr); +int wlcore_event_fw_logger(struct wl1271 *wl); #endif diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c index 68e74eefd296..9ac118e727e9 100644 --- a/drivers/net/wireless/ti/wlcore/io.c +++ b/drivers/net/wireless/ti/wlcore/io.c @@ -175,12 +175,13 @@ int wlcore_set_partition(struct wl1271 *wl, if (ret < 0) goto out; - /* - * We don't need the size of the last partition, as it is - * automatically calculated based on the total memory size and - * the sizes of the previous partitions. - */ ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size); + if (ret < 0) + goto out; out: return ret; diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h index 0305729d0986..6c257b54f415 100644 --- a/drivers/net/wireless/ti/wlcore/io.h +++ b/drivers/net/wireless/ti/wlcore/io.h @@ -36,8 +36,8 @@ #define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) #define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) #define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) -#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) - +#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) +#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) #define HW_ACCESS_REGISTER_SIZE 4 #define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 @@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg, static inline void wl1271_power_off(struct wl1271 *wl) { - int ret; + int ret = 0; if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags)) return; - ret = wl->if_ops->power(wl->dev, false); + if (wl->if_ops->power) + ret = wl->if_ops->power(wl->dev, false); if (!ret) clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); } static inline int wl1271_power_on(struct wl1271 *wl) { - int ret = wl->if_ops->power(wl->dev, true); + int ret = 0; + + if (wl->if_ops->power) + ret = wl->if_ops->power(wl->dev, true); if (ret == 0) set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ec7f6af3fab2..d1109c4f0f0d 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1,4 +1,3 @@ - /* * This file is part of wlcore * @@ -303,25 +302,11 @@ out: static void wlcore_adjust_conf(struct wl1271 *wl) { - /* Adjust settings according to optional module parameters */ - - /* Firmware Logger params */ - if (fwlog_mem_blocks != -1) { - if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS && - fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) { - wl->conf.fwlog.mem_blocks = fwlog_mem_blocks; - } else { - wl1271_error( - "Illegal fwlog_mem_blocks=%d using default %d", - fwlog_mem_blocks, wl->conf.fwlog.mem_blocks); - } - } if (fwlog_param) { if (!strcmp(fwlog_param, "continuous")) { wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; - } else if (!strcmp(fwlog_param, "ondemand")) { - wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; + wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST; } else if (!strcmp(fwlog_param, "dbgpins")) { wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; @@ -825,91 +810,32 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) static void wl12xx_read_fwlog_panic(struct wl1271 *wl) { - struct wlcore_partition_set part, old_part; - u32 addr; - u32 offset; - u32 end_of_log; - u8 *block; - int ret; + u32 end_of_log = 0; - if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || - (wl->conf.fwlog.mem_blocks == 0)) + if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) return; wl1271_info("Reading FW panic log"); - block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL); - if (!block) - return; - /* * Make sure the chip is awake and the logger isn't active. * Do not send a stop fwlog command if the fw is hanged or if * dbgpins are used (due to some fw bug). */ if (wl1271_ps_elp_wakeup(wl)) - goto out; + return; if (!wl->watchdog_recovery && wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) wl12xx_cmd_stop_fwlog(wl); - /* Read the first memory block address */ - ret = wlcore_fw_status(wl, wl->fw_status); - if (ret < 0) - goto out; - - addr = wl->fw_status->log_start_addr; - if (!addr) - goto out; - - if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) { - offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor); - end_of_log = wl->fwlog_end; - } else { - offset = sizeof(addr); - end_of_log = addr; - } - - old_part = wl->curr_part; - memset(&part, 0, sizeof(part)); - /* Traverse the memory blocks linked list */ do { - part.mem.start = wlcore_hw_convert_hwaddr(wl, addr); - part.mem.size = PAGE_SIZE; - - ret = wlcore_set_partition(wl, &part); - if (ret < 0) { - wl1271_error("%s: set_partition start=0x%X size=%d", - __func__, part.mem.start, part.mem.size); - goto out; + end_of_log = wlcore_event_fw_logger(wl); + if (end_of_log == 0) { + msleep(100); + end_of_log = wlcore_event_fw_logger(wl); } - - memset(block, 0, wl->fw_mem_block_size); - ret = wlcore_read_hwaddr(wl, addr, block, - wl->fw_mem_block_size, false); - - if (ret < 0) - goto out; - - /* - * Memory blocks are linked to one another. The first 4 bytes - * of each memory block hold the hardware address of the next - * one. The last memory block points to the first one in - * on demand mode and is equal to 0x2000000 in continuous mode. - */ - addr = le32_to_cpup((__le32 *)block); - - if (!wl12xx_copy_fwlog(wl, block + offset, - wl->fw_mem_block_size - offset)) - break; - } while (addr && (addr != end_of_log)); - - wake_up_interruptible(&wl->fwlog_waitq); - -out: - kfree(block); - wlcore_set_partition(wl, &old_part); + } while (end_of_log != 0); } static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif, @@ -6291,7 +6217,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, wl->active_sta_count = 0; wl->active_link_count = 0; wl->fwlog_size = 0; - init_waitqueue_head(&wl->fwlog_waitq); /* The system link is always allocated */ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); @@ -6377,7 +6302,6 @@ int wlcore_free_hw(struct wl1271 *wl) /* Unblock any fwlog readers */ mutex_lock(&wl->mutex); wl->fwlog_size = -1; - wake_up_interruptible_all(&wl->fwlog_waitq); mutex_unlock(&wl->mutex); wlcore_sysfs_free(wl); @@ -6584,7 +6508,7 @@ MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); module_param_named(fwlog, fwlog_param, charp, 0); MODULE_PARM_DESC(fwlog, - "FW logger options: continuous, ondemand, dbgpins or disable"); + "FW logger options: continuous, dbgpins or disable"); module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks"); diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 5b2927391d1c..34e7e938ede4 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -149,7 +149,6 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { size_t len = length - sizeof(*desc); wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); - wake_up_interruptible(&wl->fwlog_waitq); return 0; } diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 236b41090827..44f059f7f34e 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -73,7 +73,10 @@ */ #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) -#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) +/* Maximum number of SPI write chunks */ +#define WSPI_MAX_NUM_OF_CHUNKS \ + ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1) + struct wl12xx_spi_glue { struct device *dev; @@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, void *buf, size_t len, bool fixed) { struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); - struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)]; + /* SPI write buffers - 2 for each chunk */ + struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; struct spi_message m; - u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; + u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */ u32 *cmd; u32 chunk_len; int i; diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index 24dd288d6809..a9218e5b0efc 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -119,32 +119,6 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, if (ret < 0) return -ERESTARTSYS; - /* Let only one thread read the log at a time, blocking others */ - while (wl->fwlog_size == 0) { - DEFINE_WAIT(wait); - - prepare_to_wait_exclusive(&wl->fwlog_waitq, - &wait, - TASK_INTERRUPTIBLE); - - if (wl->fwlog_size != 0) { - finish_wait(&wl->fwlog_waitq, &wait); - break; - } - - mutex_unlock(&wl->mutex); - - schedule(); - finish_wait(&wl->fwlog_waitq, &wait); - - if (signal_pending(current)) - return -ERESTARTSYS; - - ret = mutex_lock_interruptible(&wl->mutex); - if (ret < 0) - return -ERESTARTSYS; - } - /* Check if the fwlog is still valid */ if (wl->fwlog_size < 0) { mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 906be6aa4eb6..dda01b118c26 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -310,9 +310,6 @@ struct wl1271 { /* FW memory block size */ u32 fw_mem_block_size; - /* Sysfs FW log entry readers wait queue */ - wait_queue_head_t fwlog_waitq; - /* Hardware recovery work */ struct work_struct recovery_work; bool watchdog_recovery; diff --git a/drivers/net/wireless/zydas/Kconfig b/drivers/net/wireless/zydas/Kconfig new file mode 100644 index 000000000000..a58c0f65e376 --- /dev/null +++ b/drivers/net/wireless/zydas/Kconfig @@ -0,0 +1,35 @@ +config WLAN_VENDOR_ZYDAS + bool "ZyDAS devices" + default y + ---help--- + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_ZYDAS + +config USB_ZD1201 + tristate "USB ZD1201 based Wireless device support" + depends on CFG80211 && USB + select WIRELESS_EXT + select WEXT_PRIV + select FW_LOADER + ---help--- + Say Y if you want to use wireless LAN adapters based on the ZyDAS + ZD1201 chip. + + This driver makes the adapter appear as a normal Ethernet interface, + typically on wlan0. + + The zd1201 device requires external firmware to be loaded. + This can be found at http://linux-lc100020.sourceforge.net/ + + To compile this driver as a module, choose M here: the + module will be called zd1201. + +source "drivers/net/wireless/zydas/zd1211rw/Kconfig" + +endif # WLAN_VENDOR_ZYDAS diff --git a/drivers/net/wireless/zydas/Makefile b/drivers/net/wireless/zydas/Makefile new file mode 100644 index 000000000000..679fbbf3a6cd --- /dev/null +++ b/drivers/net/wireless/zydas/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ZD1211RW) += zd1211rw/ + +obj-$(CONFIG_USB_ZD1201) += zd1201.o diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 6f5c793a7855..6f5c793a7855 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c diff --git a/drivers/net/wireless/zd1201.h b/drivers/net/wireless/zydas/zd1201.h index dd7ea1f35bef..dd7ea1f35bef 100644 --- a/drivers/net/wireless/zd1201.h +++ b/drivers/net/wireless/zydas/zd1201.h diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zydas/zd1211rw/Kconfig index 95920581860a..95920581860a 100644 --- a/drivers/net/wireless/zd1211rw/Kconfig +++ b/drivers/net/wireless/zydas/zd1211rw/Kconfig diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zydas/zd1211rw/Makefile index 5728a918e508..5728a918e508 100644 --- a/drivers/net/wireless/zd1211rw/Makefile +++ b/drivers/net/wireless/zydas/zd1211rw/Makefile diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c index 07b94eda9604..07b94eda9604 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zydas/zd1211rw/zd_chip.h index b03786c9f3aa..b03786c9f3aa 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.h diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zydas/zd1211rw/zd_def.h index 41bd755bc135..41bd755bc135 100644 --- a/drivers/net/wireless/zd1211rw/zd_def.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_def.h diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index e539d9b1b562..e539d9b1b562 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h index 5a484235308f..5a484235308f 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf.c index dc179c414518..dc179c414518 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf.c diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h index 8f14e25e1041..8f14e25e1041 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_al2230.c index 99aed7d78952..99aed7d78952 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_al2230.c diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_al7230b.c index 5fea485be574..5fea485be574 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_al7230b.c diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c index a93f657a41c7..a93f657a41c7 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_uw2453.c index 61b924027356..61b924027356 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_uw2453.c diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index a912dc051111..a912dc051111 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h index a9075f225178..a9075f225178 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e481f3710bd3..1049c34e7d43 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, struct netrx_pending_operations *npo) { struct xenvif_rx_meta *meta; - struct xen_netif_rx_request *req; + struct xen_netif_rx_request req; - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; meta->size = 0; - meta->id = req->id; + meta->id = req.id; npo->copy_off = 0; - npo->copy_gref = req->gref; + npo->copy_gref = req.gref; return meta; } @@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; - struct xen_netif_rx_request *req; + struct xen_netif_rx_request req; struct xenvif_rx_meta *meta; unsigned char *data; int head = 1; @@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, /* Set up a GSO prefix descriptor, if necessary */ if ((1 << gso_type) & vif->gso_prefix_mask) { - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; - meta->id = req->id; + meta->id = req.id; } - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; if ((1 << gso_type) & vif->gso_mask) { @@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, } meta->size = 0; - meta->id = req->id; + meta->id = req.id; npo->copy_off = 0; - npo->copy_gref = req->gref; + npo->copy_gref = req.gref; data = skb->data; while (data < skb_tail_pointer(skb)) { @@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue) * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ - max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; - max_burst = min(max_burst, 131072UL); - max_burst = max(max_burst, queue->credit_bytes); + max_burst = max(131072UL, queue->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ max_credit = queue->remaining_credit + queue->credit_bytes; @@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; - txp = RING_GET_REQUEST(&queue->tx, cons++); + RING_COPY_REQUEST(&queue->tx, cons++, txp); } while (1); queue->tx.req_cons = cons; } @@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, if (drop_err) txp = &dropped_tx; - memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), - sizeof(*txp)); + RING_COPY_REQUEST(&queue->tx, cons + slots, txp); /* If the guest submitted a frame >= 64 KiB then * first->size overflowed and following slots will @@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue, return -EBADR; } - memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), - sizeof(extra)); + RING_COPY_REQUEST(&queue->tx, cons, &extra); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { queue->tx.req_cons = ++cons; @@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, idx = queue->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ - memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); + RING_COPY_REQUEST(&queue->tx, idx, &txreq); /* Credit-based scheduling. */ if (txreq.size > queue->remaining_credit && diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 0d6003dee3af..7437c9dfd8fc 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -76,4 +76,5 @@ source "drivers/nfc/st21nfca/Kconfig" source "drivers/nfc/st-nci/Kconfig" source "drivers/nfc/nxp-nci/Kconfig" source "drivers/nfc/s3fwrn5/Kconfig" +source "drivers/nfc/st95hf/Kconfig" endmenu diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index e3621416a48e..0a99e67daa10 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/ obj-$(CONFIG_NFC_ST_NCI) += st-nci/ obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/ obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5/ +obj-$(CONFIG_NFC_ST95HF) += st95hf/ diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 532db28145c7..5e797d5c38ed 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -298,6 +298,12 @@ static int fdp_nci_i2c_probe(struct i2c_client *client, return -ENODEV; } + /* Checking if we have an irq */ + if (client->irq <= 0) { + nfc_err(dev, "IRQ not present\n"); + return -ENODEV; + } + phy = devm_kzalloc(dev, sizeof(struct fdp_i2c_phy), GFP_KERNEL); if (!phy) @@ -307,12 +313,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client, phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; i2c_set_clientdata(client, phy); - /* Checking if we have an irq */ - if (client->irq <= 0) { - dev_err(dev, "IRQ not present\n"); - return -ENODEV; - } - r = request_threaded_irq(client->irq, NULL, fdp_nci_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, FDP_I2C_DRIVER_NAME, phy); diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c index daf352597ef8..918e8f2eac47 100644 --- a/drivers/nfc/microread/i2c.c +++ b/drivers/nfc/microread/i2c.c @@ -50,8 +50,6 @@ struct microread_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; - int irq; - int hard_fault; /* * < 0 if hardware error occured (e.g. i2c err) * and prevents normal operation. diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index 26ac9e5fa1ab..93aaca586858 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -32,6 +32,8 @@ #define NFCSIM_POLL_TARGET 2 #define NFCSIM_POLL_DUAL (NFCSIM_POLL_INITIATOR | NFCSIM_POLL_TARGET) +#define RX_DEFAULT_DELAY 5 + struct nfcsim { struct nfc_dev *nfc_dev; @@ -51,6 +53,8 @@ struct nfcsim { u8 initiator; + u32 rx_delay; + data_exchange_cb_t cb; void *cb_context; @@ -320,10 +324,9 @@ static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target, * If packet transmission occurs immediately between them, we have a * non-stop flow of several tens of thousands SYMM packets per second * and a burning cpu. - * - * TODO: Add support for a sysfs entry to control this delay. */ - queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); + queue_delayed_work(wq, &peer->recv_work, + msecs_to_jiffies(dev->rx_delay)); mutex_unlock(&peer->lock); @@ -461,6 +464,7 @@ static struct nfcsim *nfcsim_init_dev(void) if (rc) goto free_nfc_dev; + dev->rx_delay = RX_DEFAULT_DELAY; return dev; free_nfc_dev: diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index df4333c7ee0f..11520f472f98 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -52,7 +52,6 @@ struct nxp_nci_i2c_phy { unsigned int gpio_en; unsigned int gpio_fw; - unsigned int gpio_irq; int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) @@ -85,7 +84,7 @@ static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb) return phy->hard_fault; r = i2c_master_send(client, skb->data, skb->len); - if (r == -EREMOTEIO) { + if (r < 0) { /* Retry, chip was in standby */ usleep_range(110000, 120000); r = i2c_master_send(client, skb->data, skb->len); @@ -264,8 +263,6 @@ exit_irq_none: return IRQ_NONE; } -#ifdef CONFIG_OF - static int nxp_nci_i2c_parse_devtree(struct i2c_client *client) { struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client); @@ -294,48 +291,24 @@ static int nxp_nci_i2c_parse_devtree(struct i2c_client *client) } phy->gpio_fw = r; - r = irq_of_parse_and_map(pp, 0); - if (r < 0) { - nfc_err(&client->dev, "Unable to get irq, error: %d\n", r); - return r; - } - client->irq = r; - return 0; } -#else - -static int nxp_nci_i2c_parse_devtree(struct i2c_client *client) -{ - return -ENODEV; -} - -#endif - static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy) { struct i2c_client *client = phy->i2c_dev; - struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq; + struct gpio_desc *gpiod_en, *gpiod_fw; gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW); gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW); - gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN); - if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) { + if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw)) { nfc_err(&client->dev, "No GPIOs\n"); return -EINVAL; } - client->irq = gpiod_to_irq(gpiod_irq); - if (client->irq < 0) { - nfc_err(&client->dev, "No IRQ\n"); - return -EINVAL; - } - phy->gpio_en = desc_to_gpio(gpiod_en); phy->gpio_fw = desc_to_gpio(gpiod_fw); - phy->gpio_irq = desc_to_gpio(gpiod_irq); return 0; } @@ -374,7 +347,6 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, } else if (pdata) { phy->gpio_en = pdata->gpio_en; phy->gpio_fw = pdata->gpio_fw; - client->irq = pdata->irq; } else if (ACPI_HANDLE(&client->dev)) { r = nxp_nci_i2c_acpi_config(phy); if (r < 0) diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index fa75c53f3fa5..76c318444304 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -166,7 +166,6 @@ struct pn544_i2c_phy { struct nfc_hci_dev *hdev; unsigned int gpio_en; - unsigned int gpio_irq; unsigned int gpio_fw; unsigned int en_polarity; @@ -879,9 +878,8 @@ static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client) { struct pn544_i2c_phy *phy = i2c_get_clientdata(client); const struct acpi_device_id *id; - struct gpio_desc *gpiod_en, *gpiod_irq, *gpiod_fw; + struct gpio_desc *gpiod_en, *gpiod_fw; struct device *dev; - int ret; if (!client) return -EINVAL; @@ -914,32 +912,9 @@ static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client) phy->gpio_fw = desc_to_gpio(gpiod_fw); - /* Get IRQ GPIO */ - gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0, - GPIOD_IN); - if (IS_ERR(gpiod_irq)) { - nfc_err(dev, "Unable to get IRQ GPIO\n"); - return -ENODEV; - } - - phy->gpio_irq = desc_to_gpio(gpiod_irq); - - /* Map the pin to an IRQ */ - ret = gpiod_to_irq(gpiod_irq); - if (ret < 0) { - nfc_err(dev, "Fail pin IRQ mapping\n"); - return ret; - } - - nfc_info(dev, "GPIO resource, no:%d irq:%d\n", - desc_to_gpio(gpiod_irq), ret); - client->irq = ret; - return 0; } -#ifdef CONFIG_OF - static int pn544_hci_i2c_of_request_resources(struct i2c_client *client) { struct pn544_i2c_phy *phy = i2c_get_clientdata(client); @@ -996,15 +971,6 @@ static int pn544_hci_i2c_of_request_resources(struct i2c_client *client) goto err_gpio_fw; } - /* IRQ */ - ret = irq_of_parse_and_map(pp, 0); - if (ret < 0) { - nfc_err(&client->dev, - "Unable to get irq, error: %d\n", ret); - goto err_gpio_fw; - } - client->irq = ret; - return 0; err_gpio_fw: @@ -1015,15 +981,6 @@ err_dt: return ret; } -#else - -static int pn544_hci_i2c_of_request_resources(struct i2c_client *client) -{ - return -ENODEV; -} - -#endif - static int pn544_hci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1076,7 +1033,6 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE); phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET); - phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ); /* Using ACPI */ } else if (ACPI_HANDLE(&client->dev)) { r = pn544_hci_i2c_acpi_request_resources(client); diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 0d866ca295e3..9d9c8d57a042 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -147,7 +147,7 @@ static struct nci_ops s3fwrn5_nci_ops = { }; int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev, - struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload) + const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload) { struct s3fwrn5_info *info; int ret; diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index c61d8a308da4..3ed0adf6479b 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -125,7 +125,7 @@ static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb) return 0; } -static struct s3fwrn5_phy_ops i2c_phy_ops = { +static const struct s3fwrn5_phy_ops i2c_phy_ops = { .set_wake = s3fwrn5_i2c_set_wake, .set_mode = s3fwrn5_i2c_set_mode, .get_mode = s3fwrn5_i2c_get_mode, diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h index 89210d4828b8..7d5e516036fb 100644 --- a/drivers/nfc/s3fwrn5/s3fwrn5.h +++ b/drivers/nfc/s3fwrn5/s3fwrn5.h @@ -44,7 +44,7 @@ struct s3fwrn5_info { void *phy_id; struct device *pdev; - struct s3fwrn5_phy_ops *phy_ops; + const struct s3fwrn5_phy_ops *phy_ops; unsigned int max_payload; struct s3fwrn5_fw_info fw_info; @@ -90,7 +90,7 @@ static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb) } int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev, - struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload); + const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload); void s3fwrn5_remove(struct nci_dev *ndev); int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig index e7c6db9c5860..dc9b777d78f6 100644 --- a/drivers/nfc/st-nci/Kconfig +++ b/drivers/nfc/st-nci/Kconfig @@ -1,19 +1,14 @@ config NFC_ST_NCI - tristate "STMicroelectronics ST NCI NFC driver" - depends on NFC_NCI - default n + tristate ---help--- STMicroelectronics NFC NCI chips core driver. It implements the chipset NCI logic and hooks into the NFC kernel APIs. Physical layers will register against it. - To compile this driver as a module, choose m here. The module will - be called st-nci. - Say N if unsure. - config NFC_ST_NCI_I2C - tristate "NFC ST NCI i2c support" - depends on NFC_ST_NCI && I2C + tristate "STMicroelectronics ST NCI NFC driver (I2C)" + depends on NFC_NCI && I2C + select NFC_ST_NCI ---help--- This module adds support for an I2C interface to the STMicroelectronics NFC NCI chips familly. @@ -23,8 +18,9 @@ config NFC_ST_NCI_I2C Say N if unsure. config NFC_ST_NCI_SPI - tristate "NFC ST NCI spi support" - depends on NFC_ST_NCI && SPI + tristate "STMicroelectronics ST NCI NFC driver (SPI)" + depends on NFC_NCI && SPI + select NFC_ST_NCI ---help--- This module adds support for an SPI interface to the STMicroelectronics NFC NCI chips familly. diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index 15e3ce2d274c..8a56b5c6e4c4 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -20,8 +20,10 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> @@ -40,11 +42,7 @@ #define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c" -static struct i2c_device_id st_nci_i2c_id_table[] = { - {ST_NCI_DRIVER_NAME, 0}, - {} -}; -MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table); +#define ST_NCI_GPIO_NAME_RESET "clf_reset" struct st_nci_i2c_phy { struct i2c_client *i2c_dev; @@ -210,7 +208,43 @@ static struct nfc_phy_ops i2c_phy_ops = { .disable = st_nci_i2c_disable, }; -#ifdef CONFIG_OF +static int st_nci_i2c_acpi_request_resources(struct i2c_client *client) +{ + struct st_nci_i2c_phy *phy = i2c_get_clientdata(client); + const struct acpi_device_id *id; + struct gpio_desc *gpiod_reset; + struct device *dev; + + if (!client) + return -EINVAL; + + dev = &client->dev; + + /* Match the struct device against a given list of ACPI IDs */ + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!id) + return -ENODEV; + + /* Get RESET GPIO from ACPI */ + gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1, + GPIOD_OUT_HIGH); + if (IS_ERR(gpiod_reset)) { + nfc_err(dev, "Unable to get RESET GPIO\n"); + return -ENODEV; + } + + phy->gpio_reset = desc_to_gpio(gpiod_reset); + + phy->irq_polarity = irq_get_trigger_type(client->irq); + + phy->se_status.is_ese_present = + device_property_present(dev, "ese-present"); + phy->se_status.is_uicc_present = + device_property_present(dev, "uicc-present"); + + return 0; +} + static int st_nci_i2c_of_request_resources(struct i2c_client *client) { struct st_nci_i2c_phy *phy = i2c_get_clientdata(client); @@ -232,7 +266,7 @@ static int st_nci_i2c_of_request_resources(struct i2c_client *client) /* GPIO request and configuration */ r = devm_gpio_request_one(&client->dev, gpio, - GPIOF_OUT_INIT_HIGH, "clf_reset"); + GPIOF_OUT_INIT_HIGH, ST_NCI_GPIO_NAME_RESET); if (r) { nfc_err(&client->dev, "Failed to request reset pin\n"); return r; @@ -248,12 +282,6 @@ static int st_nci_i2c_of_request_resources(struct i2c_client *client) return 0; } -#else -static int st_nci_i2c_of_request_resources(struct i2c_client *client) -{ - return -ENODEV; -} -#endif static int st_nci_i2c_request_resources(struct i2c_client *client) { @@ -272,7 +300,8 @@ static int st_nci_i2c_request_resources(struct i2c_client *client) phy->irq_polarity = pdata->irq_polarity; r = devm_gpio_request_one(&client->dev, - phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset"); + phy->gpio_reset, GPIOF_OUT_INIT_HIGH, + ST_NCI_GPIO_NAME_RESET); if (r) { pr_err("%s : reset gpio_request failed\n", __FILE__); return r; @@ -322,6 +351,12 @@ static int st_nci_i2c_probe(struct i2c_client *client, "Cannot get platform resources\n"); return r; } + } else if (ACPI_HANDLE(&client->dev)) { + r = st_nci_i2c_acpi_request_resources(client); + if (r) { + nfc_err(&client->dev, "Cannot get ACPI data\n"); + return r; + } } else { nfc_err(&client->dev, "st_nci platform resources not available\n"); @@ -358,7 +393,19 @@ static int st_nci_i2c_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_OF +static struct i2c_device_id st_nci_i2c_id_table[] = { + {ST_NCI_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table); + +static const struct acpi_device_id st_nci_i2c_acpi_match[] = { + {"SMO2101"}, + {"SMO2102"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, st_nci_i2c_acpi_match); + static const struct of_device_id of_st_nci_i2c_match[] = { { .compatible = "st,st21nfcb-i2c", }, { .compatible = "st,st21nfcb_i2c", }, @@ -366,19 +413,18 @@ static const struct of_device_id of_st_nci_i2c_match[] = { {} }; MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match); -#endif static struct i2c_driver st_nci_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = ST_NCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st_nci_i2c_match), + .acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match), }, .probe = st_nci_i2c_probe, .id_table = st_nci_i2c_id_table, .remove = st_nci_i2c_remove, }; - module_i2c_driver(st_nci_i2c_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index 0884b11001ef..50880d747b02 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -20,7 +20,6 @@ #include <net/nfc/nci_core.h> #include "st-nci.h" -#include "ndlc.h" #define NDLC_TIMER_T1 100 #define NDLC_TIMER_T1_WAIT 400 diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index dbab722a0654..a53e5df803eb 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -331,7 +331,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, switch (event) { case ST_NCI_EVT_CONNECTIVITY: - + r = nfc_se_connectivity(ndev->nfc_dev, host); break; case ST_NCI_EVT_TRANSACTION: /* According to specification etsi 102 622 @@ -392,7 +392,6 @@ void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe, } EXPORT_SYMBOL_GPL(st_nci_hci_event_received); - void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb) { diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index d6519bb9dba5..821dfa950fa8 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -20,8 +20,10 @@ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> @@ -34,18 +36,14 @@ /* ndlc header */ #define ST_NCI_FRAME_HEADROOM 1 -#define ST_NCI_FRAME_TAILROOM 0 +#define ST_NCI_FRAME_TAILROOM 0 #define ST_NCI_SPI_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */ #define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */ #define ST_NCI_SPI_DRIVER_NAME "st_nci_spi" -static struct spi_device_id st_nci_spi_id_table[] = { - {ST_NCI_SPI_DRIVER_NAME, 0}, - {} -}; -MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table); +#define ST_NCI_GPIO_NAME_RESET "clf_reset" struct st_nci_spi_phy { struct spi_device *spi_dev; @@ -225,7 +223,43 @@ static struct nfc_phy_ops spi_phy_ops = { .disable = st_nci_spi_disable, }; -#ifdef CONFIG_OF +static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev) +{ + struct st_nci_spi_phy *phy = spi_get_drvdata(spi_dev); + const struct acpi_device_id *id; + struct gpio_desc *gpiod_reset; + struct device *dev; + + if (!spi_dev) + return -EINVAL; + + dev = &spi_dev->dev; + + /* Match the struct device against a given list of ACPI IDs */ + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!id) + return -ENODEV; + + /* Get RESET GPIO from ACPI */ + gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1, + GPIOD_OUT_HIGH); + if (IS_ERR(gpiod_reset)) { + nfc_err(dev, "Unable to get RESET GPIO\n"); + return -ENODEV; + } + + phy->gpio_reset = desc_to_gpio(gpiod_reset); + + phy->irq_polarity = irq_get_trigger_type(spi_dev->irq); + + phy->se_status.is_ese_present = + device_property_present(dev, "ese-present"); + phy->se_status.is_uicc_present = + device_property_present(dev, "uicc-present"); + + return 0; +} + static int st_nci_spi_of_request_resources(struct spi_device *dev) { struct st_nci_spi_phy *phy = spi_get_drvdata(dev); @@ -247,7 +281,7 @@ static int st_nci_spi_of_request_resources(struct spi_device *dev) /* GPIO request and configuration */ r = devm_gpio_request_one(&dev->dev, gpio, - GPIOF_OUT_INIT_HIGH, "clf_reset"); + GPIOF_OUT_INIT_HIGH, ST_NCI_GPIO_NAME_RESET); if (r) { nfc_err(&dev->dev, "Failed to request reset pin\n"); return r; @@ -263,12 +297,6 @@ static int st_nci_spi_of_request_resources(struct spi_device *dev) return 0; } -#else -static int st_nci_spi_of_request_resources(struct spi_device *dev) -{ - return -ENODEV; -} -#endif static int st_nci_spi_request_resources(struct spi_device *dev) { @@ -287,7 +315,8 @@ static int st_nci_spi_request_resources(struct spi_device *dev) phy->irq_polarity = pdata->irq_polarity; r = devm_gpio_request_one(&dev->dev, - phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset"); + phy->gpio_reset, GPIOF_OUT_INIT_HIGH, + ST_NCI_GPIO_NAME_RESET); if (r) { pr_err("%s : reset gpio_request failed\n", __FILE__); return r; @@ -338,6 +367,12 @@ static int st_nci_spi_probe(struct spi_device *dev) "Cannot get platform resources\n"); return r; } + } else if (ACPI_HANDLE(&dev->dev)) { + r = st_nci_spi_acpi_request_resources(dev); + if (r) { + nfc_err(&dev->dev, "Cannot get ACPI data\n"); + return r; + } } else { nfc_err(&dev->dev, "st_nci platform resources not available\n"); @@ -374,24 +409,34 @@ static int st_nci_spi_remove(struct spi_device *dev) return 0; } -#ifdef CONFIG_OF +static struct spi_device_id st_nci_spi_id_table[] = { + {ST_NCI_SPI_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table); + +static const struct acpi_device_id st_nci_spi_acpi_match[] = { + {"SMO2101", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, st_nci_spi_acpi_match); + static const struct of_device_id of_st_nci_spi_match[] = { { .compatible = "st,st21nfcb-spi", }, {} }; MODULE_DEVICE_TABLE(of, of_st_nci_spi_match); -#endif static struct spi_driver st_nci_spi_driver = { .driver = { .name = ST_NCI_SPI_DRIVER_NAME, .of_match_table = of_match_ptr(of_st_nci_spi_match), + .acpi_match_table = ACPI_PTR(st_nci_spi_acpi_match), }, .probe = st_nci_spi_probe, .id_table = st_nci_spi_id_table, .remove = st_nci_spi_remove, }; - module_spi_driver(st_nci_spi_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/st21nfca/Kconfig b/drivers/nfc/st21nfca/Kconfig index ee459f066ade..cc3bd5658901 100644 --- a/drivers/nfc/st21nfca/Kconfig +++ b/drivers/nfc/st21nfca/Kconfig @@ -1,20 +1,15 @@ config NFC_ST21NFCA - tristate "STMicroelectronics ST21NFCA NFC driver" - depends on NFC_HCI + tristate select CRC_CCITT - default n ---help--- STMicroelectronics ST21NFCA core driver. It implements the chipset HCI logic and hooks into the NFC kernel APIs. Physical layers will register against it. - To compile this driver as a module, choose m here. The module will - be called st21nfca. - Say N if unsure. - config NFC_ST21NFCA_I2C - tristate "NFC ST21NFCA i2c support" - depends on NFC_ST21NFCA && I2C && NFC_SHDLC + tristate "STMicroelectronics ST21NFCA NFC driver (I2C)" + depends on NFC_HCI && I2C && NFC_SHDLC + select NFC_ST21NFCA ---help--- This module adds support for the STMicroelectronics st21nfca i2c interface. Select this if your platform is using the i2c bus. diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index a98da33e680a..1f44a151d206 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -21,8 +21,10 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <linux/acpi.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -60,12 +62,7 @@ #define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c" -static struct i2c_device_id st21nfca_hci_i2c_id_table[] = { - {ST21NFCA_HCI_DRIVER_NAME, 0}, - {} -}; - -MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table); +#define ST21NFCA_GPIO_NAME_EN "clf_enable" struct st21nfca_i2c_phy { struct i2c_client *i2c_dev; @@ -167,7 +164,6 @@ static void st21nfca_hci_i2c_disable(void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; - pr_info("\n"); gpio_set_value(phy->gpio_ena, 0); phy->powered = 0; @@ -210,7 +206,6 @@ static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb) I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb); - if (phy->hard_fault != 0) return phy->hard_fault; @@ -509,7 +504,41 @@ static struct nfc_phy_ops i2c_phy_ops = { .disable = st21nfca_hci_i2c_disable, }; -#ifdef CONFIG_OF +static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) +{ + struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); + const struct acpi_device_id *id; + struct gpio_desc *gpiod_ena; + struct device *dev; + + if (!client) + return -EINVAL; + + dev = &client->dev; + + /* Match the struct device against a given list of ACPI IDs */ + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!id) + return -ENODEV; + + /* Get EN GPIO from ACPI */ + gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1, + GPIOD_OUT_LOW); + if (!IS_ERR(gpiod_ena)) + phy->gpio_ena = desc_to_gpio(gpiod_ena); + + phy->gpio_ena = desc_to_gpio(gpiod_ena); + + phy->irq_polarity = irq_get_trigger_type(client->irq); + + phy->se_status.is_ese_present = + device_property_present(dev, "ese-present"); + phy->se_status.is_uicc_present = + device_property_present(dev, "uicc-present"); + + return 0; +} + static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) { struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); @@ -530,7 +559,7 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) /* GPIO request and configuration */ r = devm_gpio_request_one(&client->dev, gpio, GPIOF_OUT_INIT_HIGH, - "clf_enable"); + ST21NFCA_GPIO_NAME_EN); if (r) { nfc_err(&client->dev, "Failed to request enable pin\n"); return r; @@ -547,12 +576,6 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) return 0; } -#else -static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) -{ - return -ENODEV; -} -#endif static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) { @@ -572,7 +595,8 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) if (phy->gpio_ena > 0) { r = devm_gpio_request_one(&client->dev, phy->gpio_ena, - GPIOF_OUT_INIT_HIGH, "clf_enable"); + GPIOF_OUT_INIT_HIGH, + ST21NFCA_GPIO_NAME_EN); if (r) { pr_err("%s : ena gpio_request failed\n", __FILE__); return r; @@ -628,6 +652,12 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, nfc_err(&client->dev, "Cannot get platform resources\n"); return r; } + } else if (ACPI_HANDLE(&client->dev)) { + r = st21nfca_hci_i2c_acpi_request_resources(client); + if (r) { + nfc_err(&client->dev, "Cannot get ACPI data\n"); + return r; + } } else { nfc_err(&client->dev, "st21nfca platform resources not available\n"); return -ENODEV; @@ -670,26 +700,36 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_OF +static struct i2c_device_id st21nfca_hci_i2c_id_table[] = { + {ST21NFCA_HCI_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table); + +static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] = { + {"SMO2100", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, st21nfca_hci_i2c_acpi_match); + static const struct of_device_id of_st21nfca_i2c_match[] = { { .compatible = "st,st21nfca-i2c", }, { .compatible = "st,st21nfca_i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match); -#endif static struct i2c_driver st21nfca_hci_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = ST21NFCA_HCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st21nfca_i2c_match), + .acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match), }, .probe = st21nfca_hci_i2c_probe, .id_table = st21nfca_hci_i2c_id_table, .remove = st21nfca_hci_i2c_remove, }; - module_i2c_driver(st21nfca_hci_i2c_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c79d99b24c96..bd56a16e4007 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -312,7 +312,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, switch (event) { case ST21NFCA_EVT_CONNECTIVITY: - break; + r = nfc_se_connectivity(hdev->ndev, host); + break; case ST21NFCA_EVT_TRANSACTION: /* * According to specification etsi 102 622 @@ -342,7 +343,7 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, transaction->aid_len + 4, transaction->params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); - break; + break; default: nfc_err(&hdev->ndev->dev, "Unexpected event on connectivity gate\n"); return 1; diff --git a/drivers/nfc/st95hf/Kconfig b/drivers/nfc/st95hf/Kconfig new file mode 100644 index 000000000000..224f266fdcb6 --- /dev/null +++ b/drivers/nfc/st95hf/Kconfig @@ -0,0 +1,10 @@ +config NFC_ST95HF + tristate "ST95HF NFC Transceiver driver" + depends on SPI && NFC_DIGITAL + help + This enables the ST NFC driver for ST95HF NFC transceiver. + This makes use of SPI framework to communicate with transceiver + and registered with NFC digital core to support Linux NFC framework. + + Say Y here to compile support for ST NFC transceiver ST95HF + linux driver into the kernel or say M to compile it as module. diff --git a/drivers/nfc/st95hf/Makefile b/drivers/nfc/st95hf/Makefile new file mode 100644 index 000000000000..00760b38ab7e --- /dev/null +++ b/drivers/nfc/st95hf/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for STMicroelectronics NFC transceiver ST95HF +# + +obj-$(CONFIG_NFC_ST95HF) += st95hf.o +st95hf-objs := spi.o core.o diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c new file mode 100644 index 000000000000..c2840e412962 --- /dev/null +++ b/drivers/nfc/st95hf/core.c @@ -0,0 +1,1273 @@ +/* + * -------------------------------------------------------------------- + * Driver for ST NFC Transceiver ST95HF + * -------------------------------------------------------------------- + * Copyright (C) 2015 STMicroelectronics Pvt. Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/nfc.h> +#include <linux/of_gpio.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> +#include <linux/wait.h> +#include <net/nfc/digital.h> +#include <net/nfc/nfc.h> + +#include "spi.h" + +/* supported protocols */ +#define ST95HF_SUPPORTED_PROT (NFC_PROTO_ISO14443_MASK | \ + NFC_PROTO_ISO14443_B_MASK | \ + NFC_PROTO_ISO15693_MASK) +/* driver capabilities */ +#define ST95HF_CAPABILITIES NFC_DIGITAL_DRV_CAPS_IN_CRC + +/* Command Send Interface */ +/* ST95HF_COMMAND_SEND CMD Ids */ +#define ECHO_CMD 0x55 +#define WRITE_REGISTER_CMD 0x9 +#define PROTOCOL_SELECT_CMD 0x2 +#define SEND_RECEIVE_CMD 0x4 + +/* Select protocol codes */ +#define ISO15693_PROTOCOL_CODE 0x1 +#define ISO14443A_PROTOCOL_CODE 0x2 +#define ISO14443B_PROTOCOL_CODE 0x3 + +/* + * head room len is 3 + * 1 byte for control byte + * 1 byte for cmd + * 1 byte for size + */ +#define ST95HF_HEADROOM_LEN 3 + +/* + * tailroom is 1 for ISO14443A + * and 0 for ISO14443B/ISO15693, + * hence the max value 1 should be + * taken. + */ +#define ST95HF_TAILROOM_LEN 1 + +/* Command Response interface */ +#define MAX_RESPONSE_BUFFER_SIZE 280 +#define ECHORESPONSE 0x55 +#define ST95HF_ERR_MASK 0xF +#define ST95HF_TIMEOUT_ERROR 0x87 +#define ST95HF_NFCA_CRC_ERR_MASK 0x20 +#define ST95HF_NFCB_CRC_ERR_MASK 0x01 + +/* ST95HF transmission flag values */ +#define TRFLAG_NFCA_SHORT_FRAME 0x07 +#define TRFLAG_NFCA_STD_FRAME 0x08 +#define TRFLAG_NFCA_STD_FRAME_CRC 0x28 + +/* Misc defs */ +#define HIGH 1 +#define LOW 0 +#define ISO14443A_RATS_REQ 0xE0 +#define RATS_TB1_PRESENT_MASK 0x20 +#define RATS_TA1_PRESENT_MASK 0x10 +#define TB1_FWI_MASK 0xF0 +#define WTX_REQ_FROM_TAG 0xF2 + +#define MAX_CMD_LEN 0x7 + +#define MAX_CMD_PARAMS 4 +struct cmd { + int cmd_len; + unsigned char cmd_id; + unsigned char no_cmd_params; + unsigned char cmd_params[MAX_CMD_PARAMS]; + enum req_type req; +}; + +struct param_list { + int param_offset; + int new_param_val; +}; + +/* + * List of top-level cmds to be used internally by the driver. + * All these commands are build on top of ST95HF basic commands + * such as SEND_RECEIVE_CMD, PROTOCOL_SELECT_CMD, etc. + * These top level cmds are used internally while implementing various ops of + * digital layer/driver probe or extending the digital framework layer for + * features that are not yet implemented there, for example, WTX cmd handling. + */ +enum st95hf_cmd_list { + CMD_ECHO, + CMD_ISO14443A_CONFIG, + CMD_ISO14443A_DEMOGAIN, + CMD_ISO14443B_DEMOGAIN, + CMD_ISO14443A_PROTOCOL_SELECT, + CMD_ISO14443B_PROTOCOL_SELECT, + CMD_WTX_RESPONSE, + CMD_FIELD_OFF, + CMD_ISO15693_PROTOCOL_SELECT, +}; + +static const struct cmd cmd_array[] = { + [CMD_ECHO] = { + .cmd_len = 0x2, + .cmd_id = ECHO_CMD, + .no_cmd_params = 0, + .req = SYNC, + }, + [CMD_ISO14443A_CONFIG] = { + .cmd_len = 0x7, + .cmd_id = WRITE_REGISTER_CMD, + .no_cmd_params = 0x4, + .cmd_params = {0x3A, 0x00, 0x5A, 0x04}, + .req = SYNC, + }, + [CMD_ISO14443A_DEMOGAIN] = { + .cmd_len = 0x7, + .cmd_id = WRITE_REGISTER_CMD, + .no_cmd_params = 0x4, + .cmd_params = {0x68, 0x01, 0x01, 0xDF}, + .req = SYNC, + }, + [CMD_ISO14443B_DEMOGAIN] = { + .cmd_len = 0x7, + .cmd_id = WRITE_REGISTER_CMD, + .no_cmd_params = 0x4, + .cmd_params = {0x68, 0x01, 0x01, 0x51}, + .req = SYNC, + }, + [CMD_ISO14443A_PROTOCOL_SELECT] = { + .cmd_len = 0x7, + .cmd_id = PROTOCOL_SELECT_CMD, + .no_cmd_params = 0x4, + .cmd_params = {ISO14443A_PROTOCOL_CODE, 0x00, 0x01, 0xA0}, + .req = SYNC, + }, + [CMD_ISO14443B_PROTOCOL_SELECT] = { + .cmd_len = 0x7, + .cmd_id = PROTOCOL_SELECT_CMD, + .no_cmd_params = 0x4, + .cmd_params = {ISO14443B_PROTOCOL_CODE, 0x01, 0x03, 0xFF}, + .req = SYNC, + }, + [CMD_WTX_RESPONSE] = { + .cmd_len = 0x6, + .cmd_id = SEND_RECEIVE_CMD, + .no_cmd_params = 0x3, + .cmd_params = {0xF2, 0x00, TRFLAG_NFCA_STD_FRAME_CRC}, + .req = ASYNC, + }, + [CMD_FIELD_OFF] = { + .cmd_len = 0x5, + .cmd_id = PROTOCOL_SELECT_CMD, + .no_cmd_params = 0x2, + .cmd_params = {0x0, 0x0}, + .req = SYNC, + }, + [CMD_ISO15693_PROTOCOL_SELECT] = { + .cmd_len = 0x5, + .cmd_id = PROTOCOL_SELECT_CMD, + .no_cmd_params = 0x2, + .cmd_params = {ISO15693_PROTOCOL_CODE, 0x0D}, + .req = SYNC, + }, +}; + +/* st95_digital_cmd_complete_arg stores client context */ +struct st95_digital_cmd_complete_arg { + struct sk_buff *skb_resp; + nfc_digital_cmd_complete_t complete_cb; + void *cb_usrarg; + bool rats; +}; + +/* + * structure containing ST95HF driver specific data. + * @spicontext: structure containing information required + * for spi communication between st95hf and host. + * @ddev: nfc digital device object. + * @nfcdev: nfc device object. + * @enable_gpio: gpio used to enable st95hf transceiver. + * @complete_cb_arg: structure to store various context information + * that is passed from nfc requesting thread to the threaded ISR. + * @st95hf_supply: regulator "consumer" for NFC device. + * @sendrcv_trflag: last byte of frame send by sendrecv command + * of st95hf. This byte contains transmission flag info. + * @exchange_lock: semaphore used for signaling the st95hf_remove + * function that the last outstanding async nfc request is finished. + * @rm_lock: mutex for ensuring safe access of nfc digital object + * from threaded ISR. Usage of this mutex avoids any race between + * deletion of the object from st95hf_remove() and its access from + * the threaded ISR. + * @nfcdev_free: flag to have the state of nfc device object. + * [alive | died] + * @current_protocol: current nfc protocol. + * @current_rf_tech: current rf technology. + * @fwi: frame waiting index, received in reply of RATS according to + * digital protocol. + */ +struct st95hf_context { + struct st95hf_spi_context spicontext; + struct nfc_digital_dev *ddev; + struct nfc_dev *nfcdev; + unsigned int enable_gpio; + struct st95_digital_cmd_complete_arg complete_cb_arg; + struct regulator *st95hf_supply; + unsigned char sendrcv_trflag; + struct semaphore exchange_lock; + struct mutex rm_lock; + bool nfcdev_free; + u8 current_protocol; + u8 current_rf_tech; + int fwi; +}; + +/* + * st95hf_send_recv_cmd() is for sending commands to ST95HF + * that are described in the cmd_array[]. It can optionally + * receive the response if the cmd request is of type + * SYNC. For that to happen caller must pass true to recv_res. + * For ASYNC request, recv_res is ignored and the + * function will never try to receive the response on behalf + * of the caller. + */ +static int st95hf_send_recv_cmd(struct st95hf_context *st95context, + enum st95hf_cmd_list cmd, + int no_modif, + struct param_list *list_array, + bool recv_res) +{ + unsigned char spi_cmd_buffer[MAX_CMD_LEN]; + int i, ret; + struct device *dev = &st95context->spicontext.spidev->dev; + + if (cmd_array[cmd].cmd_len > MAX_CMD_LEN) + return -EINVAL; + if (cmd_array[cmd].no_cmd_params < no_modif) + return -EINVAL; + if (no_modif && !list_array) + return -EINVAL; + + spi_cmd_buffer[0] = ST95HF_COMMAND_SEND; + spi_cmd_buffer[1] = cmd_array[cmd].cmd_id; + spi_cmd_buffer[2] = cmd_array[cmd].no_cmd_params; + + memcpy(&spi_cmd_buffer[3], cmd_array[cmd].cmd_params, + spi_cmd_buffer[2]); + + for (i = 0; i < no_modif; i++) { + if (list_array[i].param_offset >= cmd_array[cmd].no_cmd_params) + return -EINVAL; + spi_cmd_buffer[3 + list_array[i].param_offset] = + list_array[i].new_param_val; + } + + ret = st95hf_spi_send(&st95context->spicontext, + spi_cmd_buffer, + cmd_array[cmd].cmd_len, + cmd_array[cmd].req); + if (ret) { + dev_err(dev, "st95hf_spi_send failed with error %d\n", ret); + return ret; + } + + if (cmd_array[cmd].req == SYNC && recv_res) { + unsigned char st95hf_response_arr[2]; + + ret = st95hf_spi_recv_response(&st95context->spicontext, + st95hf_response_arr); + if (ret < 0) { + dev_err(dev, "spi error from st95hf_spi_recv_response(), err = 0x%x\n", + ret); + return ret; + } + + if (st95hf_response_arr[0]) { + dev_err(dev, "st95hf error from st95hf_spi_recv_response(), err = 0x%x\n", + st95hf_response_arr[0]); + return -EIO; + } + } + + return 0; +} + +static int st95hf_echo_command(struct st95hf_context *st95context) +{ + int result = 0; + unsigned char echo_response; + + result = st95hf_send_recv_cmd(st95context, CMD_ECHO, 0, NULL, false); + if (result) + return result; + + /* If control reached here, response can be taken */ + result = st95hf_spi_recv_echo_res(&st95context->spicontext, + &echo_response); + if (result) { + dev_err(&st95context->spicontext.spidev->dev, + "err: echo response receieve error = 0x%x\n", result); + return result; + } + + if (echo_response == ECHORESPONSE) + return 0; + + dev_err(&st95context->spicontext.spidev->dev, "err: echo res is 0x%x\n", + echo_response); + + return -EIO; +} + +static int secondary_configuration_type4a(struct st95hf_context *stcontext) +{ + int result = 0; + struct device *dev = &stcontext->nfcdev->dev; + + /* 14443A config setting after select protocol */ + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO14443A_CONFIG, + 0, + NULL, + true); + if (result) { + dev_err(dev, "type a config cmd, err = 0x%x\n", result); + return result; + } + + /* 14443A demo gain setting */ + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO14443A_DEMOGAIN, + 0, + NULL, + true); + if (result) + dev_err(dev, "type a demogain cmd, err = 0x%x\n", result); + + return result; +} + +static int secondary_configuration_type4b(struct st95hf_context *stcontext) +{ + int result = 0; + struct device *dev = &stcontext->nfcdev->dev; + + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO14443B_DEMOGAIN, + 0, + NULL, + true); + if (result) + dev_err(dev, "type b demogain cmd, err = 0x%x\n", result); + + return result; +} + +static int st95hf_select_protocol(struct st95hf_context *stcontext, int type) +{ + int result = 0; + struct device *dev; + + dev = &stcontext->nfcdev->dev; + + switch (type) { + case NFC_DIGITAL_RF_TECH_106A: + stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_106A; + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO14443A_PROTOCOL_SELECT, + 0, + NULL, + true); + if (result) { + dev_err(dev, "protocol sel, err = 0x%x\n", + result); + return result; + } + + /* secondary config. for 14443Type 4A after protocol select */ + result = secondary_configuration_type4a(stcontext); + if (result) { + dev_err(dev, "type a secondary config, err = 0x%x\n", + result); + return result; + } + break; + case NFC_DIGITAL_RF_TECH_106B: + stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_106B; + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO14443B_PROTOCOL_SELECT, + 0, + NULL, + true); + if (result) { + dev_err(dev, "protocol sel send, err = 0x%x\n", + result); + return result; + } + + /* + * delay of 5-6 ms is required after select protocol + * command in case of ISO14443 Type B + */ + usleep_range(50000, 60000); + + /* secondary config. for 14443Type 4B after protocol select */ + result = secondary_configuration_type4b(stcontext); + if (result) { + dev_err(dev, "type b secondary config, err = 0x%x\n", + result); + return result; + } + break; + case NFC_DIGITAL_RF_TECH_ISO15693: + stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_ISO15693; + result = st95hf_send_recv_cmd(stcontext, + CMD_ISO15693_PROTOCOL_SELECT, + 0, + NULL, + true); + if (result) { + dev_err(dev, "protocol sel send, err = 0x%x\n", + result); + return result; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static void st95hf_send_st95enable_negativepulse(struct st95hf_context *st95con) +{ + /* First make irq_in pin high */ + gpio_set_value(st95con->enable_gpio, HIGH); + + /* wait for 1 milisecond */ + usleep_range(1000, 2000); + + /* Make irq_in pin low */ + gpio_set_value(st95con->enable_gpio, LOW); + + /* wait for minimum interrupt pulse to make st95 active */ + usleep_range(1000, 2000); + + /* At end make it high */ + gpio_set_value(st95con->enable_gpio, HIGH); +} + +/* + * Send a reset sequence over SPI bus (Reset command + wait 3ms + + * negative pulse on st95hf enable gpio + */ +static int st95hf_send_spi_reset_sequence(struct st95hf_context *st95context) +{ + int result = 0; + unsigned char reset_cmd = ST95HF_COMMAND_RESET; + + result = st95hf_spi_send(&st95context->spicontext, + &reset_cmd, + ST95HF_RESET_CMD_LEN, + ASYNC); + if (result) { + dev_err(&st95context->spicontext.spidev->dev, + "spi reset sequence cmd error = %d", result); + return result; + } + + /* wait for 3 milisecond to complete the controller reset process */ + usleep_range(3000, 4000); + + /* send negative pulse to make st95hf active */ + st95hf_send_st95enable_negativepulse(st95context); + + /* wait for 10 milisecond : HFO setup time */ + usleep_range(10000, 20000); + + return result; +} + +static int st95hf_por_sequence(struct st95hf_context *st95context) +{ + int nth_attempt = 1; + int result; + + st95hf_send_st95enable_negativepulse(st95context); + + usleep_range(5000, 6000); + do { + /* send an ECHO command and checks ST95HF response */ + result = st95hf_echo_command(st95context); + + dev_dbg(&st95context->spicontext.spidev->dev, + "response from echo function = 0x%x, attempt = %d\n", + result, nth_attempt); + + if (!result) + return 0; + + /* send an pulse on IRQ in case of the chip is on sleep state */ + if (nth_attempt == 2) + st95hf_send_st95enable_negativepulse(st95context); + else + st95hf_send_spi_reset_sequence(st95context); + + /* delay of 50 milisecond */ + usleep_range(50000, 51000); + } while (nth_attempt++ < 3); + + return -ETIMEDOUT; +} + +static int iso14443_config_fdt(struct st95hf_context *st95context, int wtxm) +{ + int result = 0; + struct device *dev = &st95context->spicontext.spidev->dev; + struct nfc_digital_dev *nfcddev = st95context->ddev; + unsigned char pp_typeb; + struct param_list new_params[2]; + + pp_typeb = cmd_array[CMD_ISO14443B_PROTOCOL_SELECT].cmd_params[2]; + + if (nfcddev->curr_protocol == NFC_PROTO_ISO14443 && + st95context->fwi < 4) + st95context->fwi = 4; + + new_params[0].param_offset = 2; + if (nfcddev->curr_protocol == NFC_PROTO_ISO14443) + new_params[0].new_param_val = st95context->fwi; + else if (nfcddev->curr_protocol == NFC_PROTO_ISO14443_B) + new_params[0].new_param_val = pp_typeb; + + new_params[1].param_offset = 3; + new_params[1].new_param_val = wtxm; + + switch (nfcddev->curr_protocol) { + case NFC_PROTO_ISO14443: + result = st95hf_send_recv_cmd(st95context, + CMD_ISO14443A_PROTOCOL_SELECT, + 2, + new_params, + true); + if (result) { + dev_err(dev, "WTX type a sel proto, err = 0x%x\n", + result); + return result; + } + + /* secondary config. for 14443Type 4A after protocol select */ + result = secondary_configuration_type4a(st95context); + if (result) { + dev_err(dev, "WTX type a second. config, err = 0x%x\n", + result); + return result; + } + break; + case NFC_PROTO_ISO14443_B: + result = st95hf_send_recv_cmd(st95context, + CMD_ISO14443B_PROTOCOL_SELECT, + 2, + new_params, + true); + if (result) { + dev_err(dev, "WTX type b sel proto, err = 0x%x\n", + result); + return result; + } + + /* secondary config. for 14443Type 4B after protocol select */ + result = secondary_configuration_type4b(st95context); + if (result) { + dev_err(dev, "WTX type b second. config, err = 0x%x\n", + result); + return result; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int st95hf_handle_wtx(struct st95hf_context *stcontext, + bool new_wtx, + int wtx_val) +{ + int result = 0; + unsigned char val_mm = 0; + struct param_list new_params[1]; + struct nfc_digital_dev *nfcddev = stcontext->ddev; + struct device *dev = &stcontext->nfcdev->dev; + + if (new_wtx) { + result = iso14443_config_fdt(stcontext, wtx_val & 0x3f); + if (result) { + dev_err(dev, "Config. setting error on WTX req, err = 0x%x\n", + result); + return result; + } + + /* Send response of wtx with ASYNC as no response expected */ + new_params[0].param_offset = 1; + new_params[0].new_param_val = wtx_val; + + result = st95hf_send_recv_cmd(stcontext, + CMD_WTX_RESPONSE, + 1, + new_params, + false); + if (result) + dev_err(dev, "WTX response send, err = 0x%x\n", result); + return result; + } + + /* if no new wtx, cofigure with default values */ + if (nfcddev->curr_protocol == NFC_PROTO_ISO14443) + val_mm = cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[3]; + else if (nfcddev->curr_protocol == NFC_PROTO_ISO14443_B) + val_mm = cmd_array[CMD_ISO14443B_PROTOCOL_SELECT].cmd_params[3]; + + result = iso14443_config_fdt(stcontext, val_mm); + if (result) + dev_err(dev, "Default config. setting error after WTX processing, err = 0x%x\n", + result); + + return result; +} + +static int st95hf_error_handling(struct st95hf_context *stcontext, + struct sk_buff *skb_resp, + int res_len) +{ + int result = 0; + unsigned char error_byte; + struct device *dev = &stcontext->nfcdev->dev; + + /* First check ST95HF specific error */ + if (skb_resp->data[0] & ST95HF_ERR_MASK) { + if (skb_resp->data[0] == ST95HF_TIMEOUT_ERROR) + result = -ETIMEDOUT; + else + result = -EIO; + return result; + } + + /* Check for CRC err only if CRC is present in the tag response */ + switch (stcontext->current_rf_tech) { + case NFC_DIGITAL_RF_TECH_106A: + if (stcontext->sendrcv_trflag == TRFLAG_NFCA_STD_FRAME_CRC) { + error_byte = skb_resp->data[res_len - 3]; + if (error_byte & ST95HF_NFCA_CRC_ERR_MASK) { + /* CRC error occurred */ + dev_err(dev, "CRC error, byte received = 0x%x\n", + error_byte); + result = -EIO; + } + } + break; + case NFC_DIGITAL_RF_TECH_106B: + case NFC_DIGITAL_RF_TECH_ISO15693: + error_byte = skb_resp->data[res_len - 1]; + if (error_byte & ST95HF_NFCB_CRC_ERR_MASK) { + /* CRC error occurred */ + dev_err(dev, "CRC error, byte received = 0x%x\n", + error_byte); + result = -EIO; + } + break; + } + + return result; +} + +static int st95hf_response_handler(struct st95hf_context *stcontext, + struct sk_buff *skb_resp, + int res_len) +{ + int result = 0; + int skb_len; + unsigned char val_mm; + struct nfc_digital_dev *nfcddev = stcontext->ddev; + struct device *dev = &stcontext->nfcdev->dev; + struct st95_digital_cmd_complete_arg *cb_arg; + + cb_arg = &stcontext->complete_cb_arg; + + /* Process the response */ + skb_put(skb_resp, res_len); + + /* Remove st95 header */ + skb_pull(skb_resp, 2); + + skb_len = skb_resp->len; + + /* check if it is case of RATS request reply & FWI is present */ + if (nfcddev->curr_protocol == NFC_PROTO_ISO14443 && cb_arg->rats && + (skb_resp->data[1] & RATS_TB1_PRESENT_MASK)) { + if (skb_resp->data[1] & RATS_TA1_PRESENT_MASK) + stcontext->fwi = + (skb_resp->data[3] & TB1_FWI_MASK) >> 4; + else + stcontext->fwi = + (skb_resp->data[2] & TB1_FWI_MASK) >> 4; + + val_mm = cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[3]; + + result = iso14443_config_fdt(stcontext, val_mm); + if (result) { + dev_err(dev, "error in config_fdt to handle fwi of ATS, error=%d\n", + result); + return result; + } + } + cb_arg->rats = false; + + /* Remove CRC bytes only if received frames data has an eod (CRC) */ + switch (stcontext->current_rf_tech) { + case NFC_DIGITAL_RF_TECH_106A: + if (stcontext->sendrcv_trflag == TRFLAG_NFCA_STD_FRAME_CRC) + skb_trim(skb_resp, (skb_len - 5)); + else + skb_trim(skb_resp, (skb_len - 3)); + break; + case NFC_DIGITAL_RF_TECH_106B: + case NFC_DIGITAL_RF_TECH_ISO15693: + skb_trim(skb_resp, (skb_len - 3)); + break; + } + + return result; +} + +static irqreturn_t st95hf_irq_handler(int irq, void *st95hfcontext) +{ + struct st95hf_context *stcontext = + (struct st95hf_context *)st95hfcontext; + + if (stcontext->spicontext.req_issync) { + complete(&stcontext->spicontext.done); + stcontext->spicontext.req_issync = false; + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext) +{ + int result = 0; + int res_len; + static bool wtx; + struct device *dev; + struct device *spidevice; + struct nfc_digital_dev *nfcddev; + struct sk_buff *skb_resp; + struct st95hf_context *stcontext = + (struct st95hf_context *)st95hfcontext; + struct st95_digital_cmd_complete_arg *cb_arg; + + spidevice = &stcontext->spicontext.spidev->dev; + + /* + * check semaphore, if not down() already, then we don't + * know in which context the ISR is called and surely it + * will be a bug. Note that down() of the semaphore is done + * in the corresponding st95hf_in_send_cmd() and then + * only this ISR should be called. ISR will up() the + * semaphore before leaving. Hence when the ISR is called + * the correct behaviour is down_trylock() should always + * return 1 (indicating semaphore cant be taken and hence no + * change in semaphore count). + * If not, then we up() the semaphore and crash on + * a BUG() ! + */ + if (!down_trylock(&stcontext->exchange_lock)) { + up(&stcontext->exchange_lock); + WARN(1, "unknown context in ST95HF ISR"); + return IRQ_NONE; + } + + cb_arg = &stcontext->complete_cb_arg; + skb_resp = cb_arg->skb_resp; + + mutex_lock(&stcontext->rm_lock); + res_len = st95hf_spi_recv_response(&stcontext->spicontext, + skb_resp->data); + if (res_len < 0) { + dev_err(spidevice, "TISR spi response err = 0x%x\n", res_len); + result = res_len; + goto end; + } + + /* if stcontext->nfcdev_free is true, it means remove already ran */ + if (stcontext->nfcdev_free) { + result = -ENODEV; + goto end; + } + + dev = &stcontext->nfcdev->dev; + nfcddev = stcontext->ddev; + if (skb_resp->data[2] == WTX_REQ_FROM_TAG) { + /* Request for new FWT from tag */ + result = st95hf_handle_wtx(stcontext, true, skb_resp->data[3]); + if (result) + goto end; + + wtx = true; + mutex_unlock(&stcontext->rm_lock); + return IRQ_HANDLED; + } + + result = st95hf_error_handling(stcontext, skb_resp, res_len); + if (result) + goto end; + + result = st95hf_response_handler(stcontext, skb_resp, res_len); + if (result) + goto end; + + /* + * If select protocol is done on wtx req. do select protocol + * again with default values + */ + if (wtx) { + wtx = false; + result = st95hf_handle_wtx(stcontext, false, 0); + if (result) + goto end; + } + + /* call digital layer callback */ + cb_arg->complete_cb(stcontext->ddev, cb_arg->cb_usrarg, skb_resp); + + /* up the semaphore before returning */ + up(&stcontext->exchange_lock); + mutex_unlock(&stcontext->rm_lock); + + return IRQ_HANDLED; + +end: + kfree_skb(skb_resp); + wtx = false; + cb_arg->rats = false; + skb_resp = ERR_PTR(result); + /* call of callback with error */ + cb_arg->complete_cb(stcontext->ddev, cb_arg->cb_usrarg, skb_resp); + /* up the semaphore before returning */ + up(&stcontext->exchange_lock); + mutex_unlock(&stcontext->rm_lock); + return IRQ_HANDLED; +} + +/* NFC ops functions definition */ +static int st95hf_in_configure_hw(struct nfc_digital_dev *ddev, + int type, + int param) +{ + struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); + + if (type == NFC_DIGITAL_CONFIG_RF_TECH) + return st95hf_select_protocol(stcontext, param); + + if (type == NFC_DIGITAL_CONFIG_FRAMING) { + switch (param) { + case NFC_DIGITAL_FRAMING_NFCA_SHORT: + stcontext->sendrcv_trflag = TRFLAG_NFCA_SHORT_FRAME; + break; + case NFC_DIGITAL_FRAMING_NFCA_STANDARD: + stcontext->sendrcv_trflag = TRFLAG_NFCA_STD_FRAME; + break; + case NFC_DIGITAL_FRAMING_NFCA_T4T: + case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: + case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: + stcontext->sendrcv_trflag = TRFLAG_NFCA_STD_FRAME_CRC; + break; + case NFC_DIGITAL_FRAMING_NFCB: + case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY: + case NFC_DIGITAL_FRAMING_ISO15693_T5T: + break; + } + } + + return 0; +} + +static int rf_off(struct st95hf_context *stcontext) +{ + int rc; + struct device *dev; + + dev = &stcontext->nfcdev->dev; + + rc = st95hf_send_recv_cmd(stcontext, CMD_FIELD_OFF, 0, NULL, true); + if (rc) + dev_err(dev, "protocol sel send field off, err = 0x%x\n", rc); + + return rc; +} + +static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, + struct sk_buff *skb, + u16 timeout, + nfc_digital_cmd_complete_t cb, + void *arg) +{ + struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); + int rc; + struct sk_buff *skb_resp; + int len_data_to_tag = 0; + + skb_resp = nfc_alloc_recv_skb(MAX_RESPONSE_BUFFER_SIZE, GFP_KERNEL); + if (!skb_resp) { + rc = -ENOMEM; + goto error; + } + + switch (stcontext->current_rf_tech) { + case NFC_DIGITAL_RF_TECH_106A: + len_data_to_tag = skb->len + 1; + *skb_put(skb, 1) = stcontext->sendrcv_trflag; + break; + case NFC_DIGITAL_RF_TECH_106B: + case NFC_DIGITAL_RF_TECH_ISO15693: + len_data_to_tag = skb->len; + break; + default: + rc = -EINVAL; + goto free_skb_resp; + } + + skb_push(skb, 3); + skb->data[0] = ST95HF_COMMAND_SEND; + skb->data[1] = SEND_RECEIVE_CMD; + skb->data[2] = len_data_to_tag; + + stcontext->complete_cb_arg.skb_resp = skb_resp; + stcontext->complete_cb_arg.cb_usrarg = arg; + stcontext->complete_cb_arg.complete_cb = cb; + + if ((skb->data[3] == ISO14443A_RATS_REQ) && + ddev->curr_protocol == NFC_PROTO_ISO14443) + stcontext->complete_cb_arg.rats = true; + + /* + * down the semaphore to indicate to remove func that an + * ISR is pending, note that it will not block here in any case. + * If found blocked, it is a BUG! + */ + rc = down_killable(&stcontext->exchange_lock); + if (rc) { + WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); + return rc; + } + + rc = st95hf_spi_send(&stcontext->spicontext, skb->data, + skb->len, + ASYNC); + if (rc) { + dev_err(&stcontext->nfcdev->dev, + "Error %d trying to perform data_exchange", rc); + /* up the semaphore since ISR will never come in this case */ + up(&stcontext->exchange_lock); + goto free_skb_resp; + } + + kfree_skb(skb); + + return rc; + +free_skb_resp: + kfree_skb(skb_resp); +error: + return rc; +} + +/* p2p will be supported in a later release ! */ +static int st95hf_tg_configure_hw(struct nfc_digital_dev *ddev, + int type, + int param) +{ + return 0; +} + +static int st95hf_tg_send_cmd(struct nfc_digital_dev *ddev, + struct sk_buff *skb, + u16 timeout, + nfc_digital_cmd_complete_t cb, + void *arg) +{ + return 0; +} + +static int st95hf_tg_listen(struct nfc_digital_dev *ddev, + u16 timeout, + nfc_digital_cmd_complete_t cb, + void *arg) +{ + return 0; +} + +static int st95hf_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech) +{ + return 0; +} + +static int st95hf_switch_rf(struct nfc_digital_dev *ddev, bool on) +{ + u8 rf_tech; + struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); + + rf_tech = ddev->curr_rf_tech; + + if (on) + /* switch on RF field */ + return st95hf_select_protocol(stcontext, rf_tech); + + /* switch OFF RF field */ + return rf_off(stcontext); +} + +/* TODO st95hf_abort_cmd */ +static void st95hf_abort_cmd(struct nfc_digital_dev *ddev) +{ +} + +static struct nfc_digital_ops st95hf_nfc_digital_ops = { + .in_configure_hw = st95hf_in_configure_hw, + .in_send_cmd = st95hf_in_send_cmd, + + .tg_listen = st95hf_tg_listen, + .tg_configure_hw = st95hf_tg_configure_hw, + .tg_send_cmd = st95hf_tg_send_cmd, + .tg_get_rf_tech = st95hf_tg_get_rf_tech, + + .switch_rf = st95hf_switch_rf, + .abort_cmd = st95hf_abort_cmd, +}; + +static const struct spi_device_id st95hf_id[] = { + { "st95hf", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, st95hf_id); + +static int st95hf_probe(struct spi_device *nfc_spi_dev) +{ + int ret; + + struct st95hf_context *st95context; + struct st95hf_spi_context *spicontext; + + nfc_info(&nfc_spi_dev->dev, "ST95HF driver probe called.\n"); + + st95context = devm_kzalloc(&nfc_spi_dev->dev, + sizeof(struct st95hf_context), + GFP_KERNEL); + if (!st95context) + return -ENOMEM; + + spicontext = &st95context->spicontext; + + spicontext->spidev = nfc_spi_dev; + + st95context->fwi = + cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[2]; + + if (device_property_present(&nfc_spi_dev->dev, "st95hfvin")) { + st95context->st95hf_supply = + devm_regulator_get(&nfc_spi_dev->dev, + "st95hfvin"); + if (IS_ERR(st95context->st95hf_supply)) { + dev_err(&nfc_spi_dev->dev, "failed to acquire regulator\n"); + return PTR_ERR(st95context->st95hf_supply); + } + + ret = regulator_enable(st95context->st95hf_supply); + if (ret) { + dev_err(&nfc_spi_dev->dev, "failed to enable regulator\n"); + return ret; + } + } + + init_completion(&spicontext->done); + mutex_init(&spicontext->spi_lock); + + /* + * Store spicontext in spi device object for using it in + * remove function + */ + dev_set_drvdata(&nfc_spi_dev->dev, spicontext); + + st95context->enable_gpio = + of_get_named_gpio(nfc_spi_dev->dev.of_node, + "enable-gpio", + 0); + if (!gpio_is_valid(st95context->enable_gpio)) { + dev_err(&nfc_spi_dev->dev, "No valid enable gpio\n"); + ret = st95context->enable_gpio; + goto err_disable_regulator; + } + + ret = devm_gpio_request_one(&nfc_spi_dev->dev, st95context->enable_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_HIGH, + "enable_gpio"); + if (ret) + goto err_disable_regulator; + + if (nfc_spi_dev->irq > 0) { + if (devm_request_threaded_irq(&nfc_spi_dev->dev, + nfc_spi_dev->irq, + st95hf_irq_handler, + st95hf_irq_thread_handler, + IRQF_TRIGGER_FALLING, + "st95hf", + (void *)st95context) < 0) { + dev_err(&nfc_spi_dev->dev, "err: irq request for st95hf is failed\n"); + ret = -EINVAL; + goto err_disable_regulator; + } + } else { + dev_err(&nfc_spi_dev->dev, "not a valid IRQ associated with ST95HF\n"); + ret = -EINVAL; + goto err_disable_regulator; + } + + /* + * First reset SPI to handle warm reset of the system. + * It will put the ST95HF device in Power ON state + * which make the state of device identical to state + * at the time of cold reset of the system. + */ + ret = st95hf_send_spi_reset_sequence(st95context); + if (ret) { + dev_err(&nfc_spi_dev->dev, "err: spi_reset_sequence failed\n"); + goto err_disable_regulator; + } + + /* call PowerOnReset sequence of ST95hf to activate it */ + ret = st95hf_por_sequence(st95context); + if (ret) { + dev_err(&nfc_spi_dev->dev, "err: por seq failed for st95hf\n"); + goto err_disable_regulator; + } + + /* create NFC dev object and register with NFC Subsystem */ + st95context->ddev = nfc_digital_allocate_device(&st95hf_nfc_digital_ops, + ST95HF_SUPPORTED_PROT, + ST95HF_CAPABILITIES, + ST95HF_HEADROOM_LEN, + ST95HF_TAILROOM_LEN); + if (!st95context->ddev) { + ret = -ENOMEM; + goto err_disable_regulator; + } + + st95context->nfcdev = st95context->ddev->nfc_dev; + nfc_digital_set_parent_dev(st95context->ddev, &nfc_spi_dev->dev); + + ret = nfc_digital_register_device(st95context->ddev); + if (ret) { + dev_err(&st95context->nfcdev->dev, "st95hf registration failed\n"); + goto err_free_digital_device; + } + + /* store st95context in nfc device object */ + nfc_digital_set_drvdata(st95context->ddev, st95context); + + sema_init(&st95context->exchange_lock, 1); + mutex_init(&st95context->rm_lock); + + return ret; + +err_free_digital_device: + nfc_digital_free_device(st95context->ddev); +err_disable_regulator: + if (st95context->st95hf_supply) + regulator_disable(st95context->st95hf_supply); + + return ret; +} + +static int st95hf_remove(struct spi_device *nfc_spi_dev) +{ + int result = 0; + unsigned char reset_cmd = ST95HF_COMMAND_RESET; + struct st95hf_spi_context *spictx = dev_get_drvdata(&nfc_spi_dev->dev); + + struct st95hf_context *stcontext = container_of(spictx, + struct st95hf_context, + spicontext); + + mutex_lock(&stcontext->rm_lock); + + nfc_digital_unregister_device(stcontext->ddev); + nfc_digital_free_device(stcontext->ddev); + stcontext->nfcdev_free = true; + + mutex_unlock(&stcontext->rm_lock); + + /* if last in_send_cmd's ISR is pending, wait for it to finish */ + result = down_killable(&stcontext->exchange_lock); + if (result == -EINTR) + dev_err(&spictx->spidev->dev, "sleep for semaphore interrupted by signal\n"); + + /* next reset the ST95HF controller */ + result = st95hf_spi_send(&stcontext->spicontext, + &reset_cmd, + ST95HF_RESET_CMD_LEN, + ASYNC); + if (result) { + dev_err(&spictx->spidev->dev, + "ST95HF reset failed in remove() err = %d\n", result); + return result; + } + + /* wait for 3 ms to complete the controller reset process */ + usleep_range(3000, 4000); + + /* disable regulator */ + if (stcontext->st95hf_supply) + regulator_disable(stcontext->st95hf_supply); + + return result; +} + +/* Register as SPI protocol driver */ +static struct spi_driver st95hf_driver = { + .driver = { + .name = "st95hf", + .owner = THIS_MODULE, + }, + .id_table = st95hf_id, + .probe = st95hf_probe, + .remove = st95hf_remove, +}; + +module_spi_driver(st95hf_driver); + +MODULE_AUTHOR("Shikha Singh <shikha.singh@st.com>"); +MODULE_DESCRIPTION("ST NFC Transceiver ST95HF driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nfc/st95hf/spi.c b/drivers/nfc/st95hf/spi.c new file mode 100644 index 000000000000..e2d3bbcc8c34 --- /dev/null +++ b/drivers/nfc/st95hf/spi.c @@ -0,0 +1,167 @@ +/* + * ---------------------------------------------------------------------------- + * drivers/nfc/st95hf/spi.c function definitions for SPI communication + * ---------------------------------------------------------------------------- + * Copyright (C) 2015 STMicroelectronics Pvt. Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "spi.h" + +/* Function to send user provided buffer to ST95HF through SPI */ +int st95hf_spi_send(struct st95hf_spi_context *spicontext, + unsigned char *buffertx, + int datalen, + enum req_type reqtype) +{ + struct spi_message m; + int result = 0; + struct spi_device *spidev = spicontext->spidev; + struct spi_transfer tx_transfer = { + .tx_buf = buffertx, + .len = datalen, + }; + + mutex_lock(&spicontext->spi_lock); + + if (reqtype == SYNC) { + spicontext->req_issync = true; + reinit_completion(&spicontext->done); + } else { + spicontext->req_issync = false; + } + + spi_message_init(&m); + spi_message_add_tail(&tx_transfer, &m); + + result = spi_sync(spidev, &m); + if (result) { + dev_err(&spidev->dev, "error: sending cmd to st95hf using SPI = %d\n", + result); + mutex_unlock(&spicontext->spi_lock); + return result; + } + + /* return for asynchronous or no-wait case */ + if (reqtype == ASYNC) { + mutex_unlock(&spicontext->spi_lock); + return 0; + } + + result = wait_for_completion_timeout(&spicontext->done, + msecs_to_jiffies(1000)); + /* check for timeout or success */ + if (!result) { + dev_err(&spidev->dev, "error: response not ready timeout\n"); + result = -ETIMEDOUT; + } else { + result = 0; + } + + mutex_unlock(&spicontext->spi_lock); + + return result; +} +EXPORT_SYMBOL_GPL(st95hf_spi_send); + +/* Function to Receive command Response */ +int st95hf_spi_recv_response(struct st95hf_spi_context *spicontext, + unsigned char *receivebuff) +{ + int len = 0; + struct spi_transfer tx_takedata; + struct spi_message m; + struct spi_device *spidev = spicontext->spidev; + unsigned char readdata_cmd = ST95HF_COMMAND_RECEIVE; + struct spi_transfer t[2] = { + {.tx_buf = &readdata_cmd, .len = 1,}, + {.rx_buf = receivebuff, .len = 2, .cs_change = 1,}, + }; + + int ret = 0; + + memset(&tx_takedata, 0x0, sizeof(struct spi_transfer)); + + mutex_lock(&spicontext->spi_lock); + + /* First spi transfer to know the length of valid data */ + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + + ret = spi_sync(spidev, &m); + if (ret) { + dev_err(&spidev->dev, "spi_recv_resp, data length error = %d\n", + ret); + mutex_unlock(&spicontext->spi_lock); + return ret; + } + + /* As 2 bytes are already read */ + len = 2; + + /* Support of long frame */ + if (receivebuff[0] & 0x60) + len += (((receivebuff[0] & 0x60) >> 5) << 8) | receivebuff[1]; + else + len += receivebuff[1]; + + /* Now make a transfer to read only relevant bytes */ + tx_takedata.rx_buf = &receivebuff[2]; + tx_takedata.len = len - 2; + + spi_message_init(&m); + spi_message_add_tail(&tx_takedata, &m); + + ret = spi_sync(spidev, &m); + + mutex_unlock(&spicontext->spi_lock); + if (ret) { + dev_err(&spidev->dev, "spi_recv_resp, data read error = %d\n", + ret); + return ret; + } + + return len; +} +EXPORT_SYMBOL_GPL(st95hf_spi_recv_response); + +int st95hf_spi_recv_echo_res(struct st95hf_spi_context *spicontext, + unsigned char *receivebuff) +{ + unsigned char readdata_cmd = ST95HF_COMMAND_RECEIVE; + struct spi_transfer t[2] = { + {.tx_buf = &readdata_cmd, .len = 1,}, + {.rx_buf = receivebuff, .len = 1,}, + }; + struct spi_message m; + struct spi_device *spidev = spicontext->spidev; + int ret = 0; + + mutex_lock(&spicontext->spi_lock); + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + ret = spi_sync(spidev, &m); + + mutex_unlock(&spicontext->spi_lock); + + if (ret) + dev_err(&spidev->dev, "recv_echo_res, data read error = %d\n", + ret); + + return ret; +} +EXPORT_SYMBOL_GPL(st95hf_spi_recv_echo_res); diff --git a/drivers/nfc/st95hf/spi.h b/drivers/nfc/st95hf/spi.h new file mode 100644 index 000000000000..552d220747cd --- /dev/null +++ b/drivers/nfc/st95hf/spi.h @@ -0,0 +1,64 @@ +/* + * --------------------------------------------------------------------------- + * drivers/nfc/st95hf/spi.h functions declarations for SPI communication + * --------------------------------------------------------------------------- + * Copyright (C) 2015 STMicroelectronics – All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __LINUX_ST95HF_SPI_H +#define __LINUX_ST95HF_SPI_H + +#include <linux/spi/spi.h> + +/* Basic ST95HF SPI CMDs */ +#define ST95HF_COMMAND_SEND 0x0 +#define ST95HF_COMMAND_RESET 0x1 +#define ST95HF_COMMAND_RECEIVE 0x2 + +#define ST95HF_RESET_CMD_LEN 0x1 + +/* + * structure to contain st95hf spi communication specific information. + * @req_issync: true for synchronous calls. + * @spidev: st95hf spi device object. + * @done: completion structure to wait for st95hf response + * for synchronous calls. + * @spi_lock: mutex to allow only one spi transfer at a time. + */ +struct st95hf_spi_context { + bool req_issync; + struct spi_device *spidev; + struct completion done; + struct mutex spi_lock; +}; + +/* flag to differentiate synchronous & asynchronous spi request */ +enum req_type { + SYNC, + ASYNC, +}; + +int st95hf_spi_send(struct st95hf_spi_context *spicontext, + unsigned char *buffertx, + int datalen, + enum req_type reqtype); + +int st95hf_spi_recv_response(struct st95hf_spi_context *spicontext, + unsigned char *receivebuff); + +int st95hf_spi_recv_echo_res(struct st95hf_spi_context *spicontext, + unsigned char *receivebuff); + +#endif diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index f857feb2b573..10842b7051b3 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -2139,7 +2139,7 @@ static int trf7970a_remove(struct spi_device *spi) #ifdef CONFIG_PM_SLEEP static int trf7970a_suspend(struct device *dev) { - struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); dev_dbg(dev, "Suspend\n"); @@ -2155,7 +2155,7 @@ static int trf7970a_suspend(struct device *dev) static int trf7970a_resume(struct device *dev) { - struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; @@ -2174,7 +2174,7 @@ static int trf7970a_resume(struct device *dev) #ifdef CONFIG_PM static int trf7970a_pm_runtime_suspend(struct device *dev) { - struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; @@ -2191,7 +2191,7 @@ static int trf7970a_pm_runtime_suspend(struct device *dev) static int trf7970a_pm_runtime_resume(struct device *dev) { - struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9e294ff4e652..0c67b57be83c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns) { bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); - if (kill) + if (kill) { blk_set_queue_dying(ns->queue); + + /* + * The controller was shutdown first if we got here through + * device removal. The shutdown may requeue outstanding + * requests. These need to be aborted immediately so + * del_gendisk doesn't block indefinitely for their completion. + */ + blk_mq_abort_requeue_list(ns->queue); + } if (ns->disk->flags & GENHD_FL_UP) del_gendisk(ns->disk); if (kill || !blk_queue_dying(ns->queue)) { @@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns, *next; + if (nvme_io_incapable(dev)) { + /* + * If the device is not capable of IO (surprise hot-removal, + * for example), we need to quiesce prior to deleting the + * namespaces. This will end outstanding requests and prevent + * attempts to sync dirty data. + */ + nvme_dev_shutdown(dev); + } list_for_each_entry_safe(ns, next, &dev->namespaces, list) nvme_ns_remove(ns); } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a87a868fed64..86829f8064a6 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -62,11 +62,9 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi rc = irq_of_parse_and_map(child, 0); if (rc > 0) { phy->irq = rc; - if (mdio->irq) - mdio->irq[addr] = rc; + mdio->irq[addr] = rc; } else { - if (mdio->irq) - phy->irq = mdio->irq[addr]; + phy->irq = mdio->irq[addr]; } if (of_property_read_bool(child, "broken-turn-around")) @@ -75,7 +73,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi /* Associate the OF node with the device structure so it * can be looked up later */ of_node_get(child); - phy->dev.of_node = child; + phy->mdio.dev.of_node = child; /* All data is now stored in the phy struct; * register it */ @@ -92,6 +90,37 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi return 0; } +static int of_mdiobus_register_device(struct mii_bus *mdio, + struct device_node *child, + u32 addr) +{ + struct mdio_device *mdiodev; + int rc; + + mdiodev = mdio_device_create(mdio, addr); + if (!mdiodev || IS_ERR(mdiodev)) + return 1; + + /* Associate the OF node with the device structure so it + * can be looked up later. + */ + of_node_get(child); + mdiodev->dev.of_node = child; + + /* All data is now stored in the mdiodev struct; register it. */ + rc = mdio_device_register(mdiodev); + if (rc) { + mdio_device_free(mdiodev); + of_node_put(child); + return 1; + } + + dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", + child->name, addr); + + return 0; +} + int of_mdio_parse_addr(struct device *dev, const struct device_node *np) { u32 addr; @@ -114,6 +143,35 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np) } EXPORT_SYMBOL(of_mdio_parse_addr); +/* + * Return true if the child node is for a phy. It must either: + * o Compatible string of "ethernet-phy-idX.X" + * o Compatible string of "ethernet-phy-ieee802.3-c45" + * o Compatible string of "ethernet-phy-ieee802.3-c22" + * o No compatibility string + * + * A device which is not a phy is expected to have a compatible string + * indicating what sort of device it is. + */ +static bool of_mdiobus_child_is_phy(struct device_node *child) +{ + u32 phy_id; + + if (of_get_phy_id(child, &phy_id) != -EINVAL) + return true; + + if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c45")) + return true; + + if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) + return true; + + if (!of_find_property(child, "compatible", NULL)) + return true; + + return false; +} + /** * of_mdiobus_register - Register mii_bus and create PHYs from the device tree * @mdio: pointer to mii_bus structure @@ -127,17 +185,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) struct device_node *child; const __be32 *paddr; bool scanphys = false; - int addr, rc, i; + int addr, rc; /* Mask out all PHYs from auto probing. Instead the PHYs listed in * the device tree are populated after the bus has been registered */ mdio->phy_mask = ~0; - /* Clear all the IRQ properties */ - if (mdio->irq) - for (i=0; i<PHY_MAX_ADDR; i++) - mdio->irq[i] = PHY_POLL; - mdio->dev.of_node = np; /* Register the MDIO bus */ @@ -145,7 +198,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) if (rc) return rc; - /* Loop over the child nodes and register a phy_device for each one */ + /* Loop over the child nodes and register a phy_device for each phy */ for_each_available_child_of_node(np, child) { addr = of_mdio_parse_addr(&mdio->dev, child); if (addr < 0) { @@ -153,9 +206,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) continue; } - rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc) - continue; + if (of_mdiobus_child_is_phy(child)) + of_mdiobus_register_phy(mdio, child, addr); + else + of_mdiobus_register_device(mdio, child, addr); } if (!scanphys) @@ -170,16 +224,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) for (addr = 0; addr < PHY_MAX_ADDR; addr++) { /* skip already registered PHYs */ - if (mdio->phy_map[addr]) + if (mdiobus_is_registered_device(mdio, addr)) continue; /* be noisy to encourage people to set reg property */ dev_info(&mdio->dev, "scan phy %s at address %i\n", child->name, addr); - rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc) - continue; + if (of_mdiobus_child_is_phy(child)) + of_mdiobus_register_phy(mdio, child, addr); } } @@ -238,7 +291,7 @@ struct phy_device *of_phy_connect(struct net_device *dev, ret = phy_connect_direct(dev, phy, hndlr, iface); /* refcount is held by phy_connect_direct() on success */ - put_device(&phy->dev); + put_device(&phy->mdio.dev); return ret ? NULL : phy; } @@ -268,7 +321,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, ret = phy_attach_direct(dev, phy, flags, iface); /* refcount is held by phy_attach_direct() on success */ - put_device(&phy->dev); + put_device(&phy->mdio.dev); return ret ? NULL : phy; } diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index f131ba947dc6..c0ad9aaa16a7 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -5,6 +5,7 @@ config PCI_DRA7XX bool "TI DRA7xx PCIe controller" select PCIE_DW depends on OF && HAS_IOMEM && TI_PIPE3 + depends on BROKEN help Enables support for the PCIe controller in the DRA7xx SoC. There are two instances of PCIe controller in DRA7xx. This controller can diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 163671a4f798..77f7c669a1b9 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -61,7 +61,9 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, *val = *(u8 __force *) walker; else if (size == 2) *val = *(u16 __force *) walker; - else if (size != 4) + else if (size == 4) + *val = reg_val; + else return PCIBIOS_BAD_REGISTER_NUMBER; return PCIBIOS_SUCCESSFUL; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7eaa4c87fec7..7a0df3fdbfae 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -257,6 +257,7 @@ void pci_msi_mask_irq(struct irq_data *data) { msi_set_mask_bit(data, 1); } +EXPORT_SYMBOL_GPL(pci_msi_mask_irq); /** * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts @@ -266,6 +267,7 @@ void pci_msi_unmask_irq(struct irq_data *data) { msi_set_mask_bit(data, 0); } +EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); void default_restore_msi_irqs(struct pci_dev *dev) { @@ -1126,6 +1128,7 @@ struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { return to_pci_dev(desc->dev); } +EXPORT_SYMBOL(msi_desc_to_pci_dev); void *msi_desc_to_pci_sysdata(struct msi_desc *desc) { @@ -1285,6 +1288,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, domain->bus_token = DOMAIN_BUS_PCI_MSI; return domain; } +EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); /** * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a32ba753e413..9a033e8ee9a4 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -9,7 +9,9 @@ #include <linux/delay.h> #include <linux/init.h> +#include <linux/irqdomain.h> #include <linux/pci.h> +#include <linux/msi.h> #include <linux/pci_hotplug.h> #include <linux/module.h> #include <linux/pci-aspm.h> @@ -529,7 +531,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) return !!adev->power.flags.dsw_present; } -static struct pci_platform_pm_ops acpi_pci_platform_pm = { +static const struct pci_platform_pm_ops acpi_pci_platform_pm = { .is_manageable = acpi_pci_power_manageable, .set_state = acpi_pci_set_power_state, .choose_state = acpi_pci_choose_state, @@ -689,6 +691,46 @@ static struct acpi_bus_type acpi_pci_bus = { .cleanup = pci_acpi_cleanup, }; + +static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); + +/** + * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode + * @fn: Callback matching a device to a fwnode that identifies a PCI + * MSI domain. + * + * This should be called by irqchip driver, which is the parent of + * the MSI domain to provide callback interface to query fwnode. + */ +void +pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) +{ + pci_msi_get_fwnode_cb = fn; +} + +/** + * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge + * @bus: The PCI host bridge bus. + * + * This function uses the callback function registered by + * pci_msi_register_fwnode_provider() to retrieve the irq_domain with + * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. + * This returns NULL on error or when the domain is not found. + */ +struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) +{ + struct fwnode_handle *fwnode; + + if (!pci_msi_get_fwnode_cb) + return NULL; + + fwnode = pci_msi_get_fwnode_cb(&bus->dev); + if (!fwnode) + return NULL; + + return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); +} + static int __init acpi_pci_init(void) { int ret; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 314db8c1047a..d1a7105b9276 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -527,9 +527,9 @@ static void pci_restore_bars(struct pci_dev *dev) pci_update_resource(dev, i); } -static struct pci_platform_pm_ops *pci_platform_pm; +static const struct pci_platform_pm_ops *pci_platform_pm; -int pci_set_platform_pm(struct pci_platform_pm_ops *ops) +int pci_set_platform_pm(const struct pci_platform_pm_ops *ops) { if (!ops->is_manageable || !ops->set_state || !ops->choose_state || !ops->sleep_wake) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d390fc1475ec..f6f151a42147 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -68,7 +68,7 @@ struct pci_platform_pm_ops { bool (*need_resume)(struct pci_dev *dev); }; -int pci_set_platform_pm(struct pci_platform_pm_ops *ops); +int pci_set_platform_pm(const struct pci_platform_pm_ops *ops); void pci_update_current_state(struct pci_dev *dev, pci_power_t state); void pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index edb1984201e9..553a029e37f1 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -672,6 +672,8 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) * should be called from here. */ d = pci_host_bridge_of_msi_domain(bus); + if (!d) + d = pci_host_bridge_acpi_msi_domain(bus); return d; } diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index be3755c973e9..166637f2917c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -551,14 +551,6 @@ static void armpmu_init(struct arm_pmu *armpmu) }; } -int armpmu_register(struct arm_pmu *armpmu, int type) -{ - armpmu_init(armpmu); - pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, armpmu->name, type); -} - /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *__oprofile_cpu_pmu; @@ -887,6 +879,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, return -ENOMEM; } + armpmu_init(pmu); + if (!__oprofile_cpu_pmu) __oprofile_cpu_pmu = pmu; @@ -912,10 +906,13 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_free; - ret = armpmu_register(pmu, -1); + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (ret) goto out_destroy; + pr_info("enabled with %s PMU driver, %d counters available\n", + pmu->name, pmu->num_events); + return 0; out_destroy: diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 7eb5859dd035..e7e117d5dbbe 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -118,6 +118,13 @@ config PHY_RCAR_GEN2 help Support for USB PHY found on Renesas R-Car generation 2 SoCs. +config PHY_RCAR_GEN3_USB2 + tristate "Renesas R-Car generation 3 USB 2.0 PHY driver" + depends on OF && ARCH_SHMOBILE + select GENERIC_PHY + help + Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs. + config OMAP_CONTROL_PHY tristate "OMAP CONTROL PHY Driver" depends on ARCH_OMAP2PLUS || COMPILE_TEST @@ -215,6 +222,15 @@ config PHY_MT65XX_USB3 for mt65xx SoCs. it supports two usb2.0 ports and one usb3.0 port. +config PHY_HI6220_USB + tristate "hi6220 USB PHY support" + select GENERIC_PHY + select MFD_SYSCON + help + Enable this to support the HISILICON HI6220 USB PHY. + + To compile this driver as a module, choose M here. + config PHY_SUN4I_USB tristate "Allwinner sunxi SoC USB PHY driver" depends on ARCH_SUNXI && HAS_IOMEM && OF @@ -233,6 +249,7 @@ config PHY_SUN9I_USB tristate "Allwinner sun9i SoC USB PHY driver" depends on ARCH_SUNXI && HAS_IOMEM && OF depends on RESET_CONTROLLER + depends on USB_COMMON select GENERIC_PHY help Enable this to support the transceiver that is part of Allwinner @@ -373,11 +390,11 @@ config PHY_TUSB1210 config PHY_BRCMSTB_SATA tristate "Broadcom STB SATA PHY driver" - depends on ARCH_BRCMSTB + depends on ARCH_BRCMSTB || BMIPS_GENERIC depends on OF select GENERIC_PHY help - Enable this to support the SATA3 PHY on 28nm Broadcom STB SoCs. + Enable this to support the SATA3 PHY on 28nm or 40nm Broadcom STB SoCs. Likely useful only with CONFIG_SATA_BRCMSTB enabled. config PHY_CYGNUS_PCIE diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 075db1a81aa5..c80f09df3bb8 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -17,12 +17,14 @@ obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o obj-$(CONFIG_PHY_MIPHY365X) += phy-miphy365x.o obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o +obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o +obj-$(CONFIG_PHY_HI6220_USB) += phy-hi6220-usb.o obj-$(CONFIG_PHY_MT65XX_USB3) += phy-mt65xx-usb3.o obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c index 7ad72b7d2b98..082c03f6438f 100644 --- a/drivers/phy/phy-bcm-cygnus-pcie.c +++ b/drivers/phy/phy-bcm-cygnus-pcie.c @@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) struct phy_provider *provider; struct resource *res; unsigned cnt = 0; + int ret; if (of_get_child_count(node) == 0) { dev_err(dev, "PHY no child node\n"); @@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) if (of_property_read_u32(child, "reg", &id)) { dev_err(dev, "missing reg property for %s\n", child->name); - return -EINVAL; + ret = -EINVAL; + goto put_child; } if (id >= MAX_NUM_PHYS) { dev_err(dev, "invalid PHY id: %u\n", id); - return -EINVAL; + ret = -EINVAL; + goto put_child; } if (core->phys[id].phy) { dev_err(dev, "duplicated PHY id: %u\n", id); - return -EINVAL; + ret = -EINVAL; + goto put_child; } p = &core->phys[id]; p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); if (IS_ERR(p->phy)) { dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(p->phy); + ret = PTR_ERR(p->phy); + goto put_child; } p->core = core; @@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); return 0; +put_child: + of_node_put(child); + return ret; } static const struct of_device_id cygnus_pcie_phy_match_table[] = { diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 77a2e054fdea..f84a33a1bdd9 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct phy_berlin_priv *priv; struct resource *res; - int i = 0; + int ret, i = 0; u32 phy_id; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) if (of_property_read_u32(child, "reg", &phy_id)) { dev_err(dev, "missing reg property in node %s\n", child->name); - return -EINVAL; + ret = -EINVAL; + goto put_child; } if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { dev_err(dev, "invalid reg in node %s\n", child->name); - return -EINVAL; + ret = -EINVAL; + goto put_child; } phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); - if (!phy_desc) - return -ENOMEM; + if (!phy_desc) { + ret = -ENOMEM; + goto put_child; + } phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create PHY %d\n", phy_id); - return PTR_ERR(phy); + ret = PTR_ERR(phy); + goto put_child; } phy_desc->phy = phy; @@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); return PTR_ERR_OR_ZERO(phy_provider); +put_child: + of_node_put(child); + return ret; } static const struct of_device_id phy_berlin_sata_of_match[] = { diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c index 797ba17c404f..2017751ede26 100644 --- a/drivers/phy/phy-berlin-usb.c +++ b/drivers/phy/phy-berlin-usb.c @@ -9,11 +9,9 @@ * warranty of any kind, whether express or implied. */ -#include <linux/gpio.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> @@ -195,7 +193,6 @@ static int phy_berlin_usb_probe(struct platform_device *pdev) return PTR_ERR(phy); } - platform_set_drvdata(pdev, priv); phy_set_drvdata(phy, priv); phy_provider = diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c index 8a2cb16a1937..a23172ff40e3 100644 --- a/drivers/phy/phy-brcmstb-sata.c +++ b/drivers/phy/phy-brcmstb-sata.c @@ -26,13 +26,21 @@ #define SATA_MDIO_BANK_OFFSET 0x23c #define SATA_MDIO_REG_OFFSET(ofs) ((ofs) * 4) -#define SATA_MDIO_REG_SPACE_SIZE 0x1000 -#define SATA_MDIO_REG_LENGTH 0x1f00 #define MAX_PORTS 2 /* Register offset between PHYs in PCB space */ -#define SATA_MDIO_REG_SPACE_SIZE 0x1000 +#define SATA_MDIO_REG_28NM_SPACE_SIZE 0x1000 + +/* The older SATA PHY registers duplicated per port registers within the map, + * rather than having a separate map per port. + */ +#define SATA_MDIO_REG_40NM_SPACE_SIZE 0x10 + +enum brcm_sata_phy_version { + BRCM_SATA_PHY_28NM, + BRCM_SATA_PHY_40NM, +}; struct brcm_sata_port { int portnum; @@ -44,11 +52,12 @@ struct brcm_sata_port { struct brcm_sata_phy { struct device *dev; void __iomem *phy_base; + enum brcm_sata_phy_version version; struct brcm_sata_port phys[MAX_PORTS]; }; -enum sata_mdio_phy_regs_28nm { +enum sata_mdio_phy_regs { PLL_REG_BANK_0 = 0x50, PLL_REG_BANK_0_PLLCONTROL_0 = 0x81, @@ -66,8 +75,16 @@ enum sata_mdio_phy_regs_28nm { static inline void __iomem *brcm_sata_phy_base(struct brcm_sata_port *port) { struct brcm_sata_phy *priv = port->phy_priv; + u32 offset = 0; + + if (priv->version == BRCM_SATA_PHY_28NM) + offset = SATA_MDIO_REG_28NM_SPACE_SIZE; + else if (priv->version == BRCM_SATA_PHY_40NM) + offset = SATA_MDIO_REG_40NM_SPACE_SIZE; + else + dev_err(priv->dev, "invalid phy version\n"); - return priv->phy_base + (port->portnum * SATA_MDIO_REG_SPACE_SIZE); + return priv->phy_base + (port->portnum * offset); } static void brcm_sata_mdio_wr(void __iomem *addr, u32 bank, u32 ofs, @@ -86,7 +103,7 @@ static void brcm_sata_mdio_wr(void __iomem *addr, u32 bank, u32 ofs, #define FMAX_VAL_DEFAULT 0x3df #define FMAX_VAL_SSC 0x83 -static void brcm_sata_cfg_ssc_28nm(struct brcm_sata_port *port) +static void brcm_sata_cfg_ssc(struct brcm_sata_port *port) { void __iomem *base = brcm_sata_phy_base(port); struct brcm_sata_phy *priv = port->phy_priv; @@ -117,18 +134,21 @@ static int brcm_sata_phy_init(struct phy *phy) { struct brcm_sata_port *port = phy_get_drvdata(phy); - brcm_sata_cfg_ssc_28nm(port); + brcm_sata_cfg_ssc(port); return 0; } -static const struct phy_ops phy_ops_28nm = { +static const struct phy_ops phy_ops = { .init = brcm_sata_phy_init, .owner = THIS_MODULE, }; static const struct of_device_id brcm_sata_phy_of_match[] = { - { .compatible = "brcm,bcm7445-sata-phy" }, + { .compatible = "brcm,bcm7445-sata-phy", + .data = (void *)BRCM_SATA_PHY_28NM }, + { .compatible = "brcm,bcm7425-sata-phy", + .data = (void *)BRCM_SATA_PHY_40NM }, {}, }; MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match); @@ -137,10 +157,11 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node, *child; + const struct of_device_id *of_id; struct brcm_sata_phy *priv; struct resource *res; struct phy_provider *provider; - int count = 0; + int ret, count = 0; if (of_get_child_count(dn) == 0) return -ENODEV; @@ -156,6 +177,12 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) if (IS_ERR(priv->phy_base)) return PTR_ERR(priv->phy_base); + of_id = of_match_node(brcm_sata_phy_of_match, dn); + if (of_id) + priv->version = (enum brcm_sata_phy_version)of_id->data; + else + priv->version = BRCM_SATA_PHY_28NM; + for_each_available_child_of_node(dn, child) { unsigned int id; struct brcm_sata_port *port; @@ -163,26 +190,30 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) if (of_property_read_u32(child, "reg", &id)) { dev_err(dev, "missing reg property in node %s\n", child->name); - return -EINVAL; + ret = -EINVAL; + goto put_child; } if (id >= MAX_PORTS) { dev_err(dev, "invalid reg: %u\n", id); - return -EINVAL; + ret = -EINVAL; + goto put_child; } if (priv->phys[id].phy) { dev_err(dev, "already registered port %u\n", id); - return -EINVAL; + ret = -EINVAL; + goto put_child; } port = &priv->phys[id]; port->portnum = id; port->phy_priv = priv; - port->phy = devm_phy_create(dev, child, &phy_ops_28nm); + port->phy = devm_phy_create(dev, child, &phy_ops); port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); if (IS_ERR(port->phy)) { dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(port->phy); + ret = PTR_ERR(port->phy); + goto put_child; } phy_set_drvdata(port->phy, port); @@ -198,6 +229,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) dev_info(dev, "registered %d port(s)\n", count); return 0; +put_child: + of_node_put(child); + return ret; } static struct platform_driver brcm_sata_phy_driver = { diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index fc48fac003a6..8c7f27db6ad3 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get); * @np: node containing the phy * @index: index of the phy * - * Gets the phy using _of_phy_get(), and associates a device with it using - * devres. On driver detach, release function is invoked on the devres data, + * Gets the phy using _of_phy_get(), then gets a refcount to it, + * and associates a device with it using devres. On driver detach, + * release function is invoked on the devres data, * then, devres data is freed. * */ @@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, return ERR_PTR(-ENOMEM); phy = _of_phy_get(np, index); - if (!IS_ERR(phy)) { - *ptr = phy; - devres_add(dev, ptr); - } else { + if (IS_ERR(phy)) { devres_free(ptr); + return phy; } + if (!try_module_get(phy->ops->owner)) { + devres_free(ptr); + return ERR_PTR(-EPROBE_DEFER); + } + + get_device(&phy->dev); + + *ptr = phy; + devres_add(dev, ptr); + return phy; } EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); diff --git a/drivers/phy/phy-hi6220-usb.c b/drivers/phy/phy-hi6220-usb.c new file mode 100644 index 000000000000..b2141cbd4cf6 --- /dev/null +++ b/drivers/phy/phy-hi6220-usb.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> + +#define SC_PERIPH_CTRL4 0x00c + +#define CTRL4_PICO_SIDDQ BIT(6) +#define CTRL4_PICO_OGDISABLE BIT(8) +#define CTRL4_PICO_VBUSVLDEXT BIT(10) +#define CTRL4_PICO_VBUSVLDEXTSEL BIT(11) +#define CTRL4_OTG_PHY_SEL BIT(21) + +#define SC_PERIPH_CTRL5 0x010 + +#define CTRL5_USBOTG_RES_SEL BIT(3) +#define CTRL5_PICOPHY_ACAENB BIT(4) +#define CTRL5_PICOPHY_BC_MODE BIT(5) +#define CTRL5_PICOPHY_CHRGSEL BIT(6) +#define CTRL5_PICOPHY_VDATSRCEND BIT(7) +#define CTRL5_PICOPHY_VDATDETENB BIT(8) +#define CTRL5_PICOPHY_DCDENB BIT(9) +#define CTRL5_PICOPHY_IDDIG BIT(10) + +#define SC_PERIPH_CTRL8 0x018 +#define SC_PERIPH_RSTEN0 0x300 +#define SC_PERIPH_RSTDIS0 0x304 + +#define RST0_USBOTG_BUS BIT(4) +#define RST0_POR_PICOPHY BIT(5) +#define RST0_USBOTG BIT(6) +#define RST0_USBOTG_32K BIT(7) + +#define EYE_PATTERN_PARA 0x7053348c + +struct hi6220_priv { + struct regmap *reg; + struct device *dev; +}; + +static void hi6220_phy_init(struct hi6220_priv *priv) +{ + struct regmap *reg = priv->reg; + u32 val, mask; + + val = RST0_USBOTG_BUS | RST0_POR_PICOPHY | + RST0_USBOTG | RST0_USBOTG_32K; + mask = val; + regmap_update_bits(reg, SC_PERIPH_RSTEN0, mask, val); + regmap_update_bits(reg, SC_PERIPH_RSTDIS0, mask, val); +} + +static int hi6220_phy_setup(struct hi6220_priv *priv, bool on) +{ + struct regmap *reg = priv->reg; + u32 val, mask; + int ret; + + if (on) { + val = CTRL5_USBOTG_RES_SEL | CTRL5_PICOPHY_ACAENB; + mask = val | CTRL5_PICOPHY_BC_MODE; + ret = regmap_update_bits(reg, SC_PERIPH_CTRL5, mask, val); + if (ret) + goto out; + + val = CTRL4_PICO_VBUSVLDEXT | CTRL4_PICO_VBUSVLDEXTSEL | + CTRL4_OTG_PHY_SEL; + mask = val | CTRL4_PICO_SIDDQ | CTRL4_PICO_OGDISABLE; + ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val); + if (ret) + goto out; + + ret = regmap_write(reg, SC_PERIPH_CTRL8, EYE_PATTERN_PARA); + if (ret) + goto out; + } else { + val = CTRL4_PICO_SIDDQ; + mask = val; + ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val); + if (ret) + goto out; + } + + return 0; +out: + dev_err(priv->dev, "failed to setup phy ret: %d\n", ret); + return ret; +} + +static int hi6220_phy_start(struct phy *phy) +{ + struct hi6220_priv *priv = phy_get_drvdata(phy); + + return hi6220_phy_setup(priv, true); +} + +static int hi6220_phy_exit(struct phy *phy) +{ + struct hi6220_priv *priv = phy_get_drvdata(phy); + + return hi6220_phy_setup(priv, false); +} + +static struct phy_ops hi6220_phy_ops = { + .init = hi6220_phy_start, + .exit = hi6220_phy_exit, + .owner = THIS_MODULE, +}; + +static int hi6220_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + struct phy *phy; + struct hi6220_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->reg = syscon_regmap_lookup_by_phandle(dev->of_node, + "hisilicon,peripheral-syscon"); + if (IS_ERR(priv->reg)) { + dev_err(dev, "no hisilicon,peripheral-syscon\n"); + return PTR_ERR(priv->reg); + } + + hi6220_phy_init(priv); + + phy = devm_phy_create(dev, NULL, &hi6220_phy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id hi6220_phy_of_match[] = { + {.compatible = "hisilicon,hi6220-usb-phy",}, + { }, +}; +MODULE_DEVICE_TABLE(of, hi6220_phy_of_match); + +static struct platform_driver hi6220_phy_driver = { + .probe = hi6220_phy_probe, + .driver = { + .name = "hi6220-usb-phy", + .of_match_table = hi6220_phy_of_match, + } +}; +module_platform_driver(hi6220_phy_driver); + +MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver"); +MODULE_ALIAS("platform:hi6220-usb-phy"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index c47b56b4a2b8..3acd2a1808df 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c @@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev) miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), GFP_KERNEL); - if (!miphy_phy) - return -ENOMEM; + if (!miphy_phy) { + ret = -ENOMEM; + goto put_child; + } miphy_dev->phys[port] = miphy_phy; phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); if (IS_ERR(phy)) { dev_err(&pdev->dev, "failed to create PHY\n"); - return PTR_ERR(phy); + ret = PTR_ERR(phy); + goto put_child; } miphy_dev->phys[port]->phy = phy; @@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev) ret = miphy28lp_of_probe(child, miphy_phy); if (ret) - return ret; + goto put_child; ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); if (ret) - return ret; + goto put_child; phy_set_drvdata(phy, miphy_dev->phys[port]); port++; @@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev) provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); return PTR_ERR_OR_ZERO(provider); +put_child: + of_node_put(child); + return ret; } static const struct of_device_id miphy28lp_of_match[] = { diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 00a686a073ed..e661f3b36eaa 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c @@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev) miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), GFP_KERNEL); - if (!miphy_phy) - return -ENOMEM; + if (!miphy_phy) { + ret = -ENOMEM; + goto put_child; + } miphy_dev->phys[port] = miphy_phy; phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); if (IS_ERR(phy)) { dev_err(&pdev->dev, "failed to create PHY\n"); - return PTR_ERR(phy); + ret = PTR_ERR(phy); + goto put_child; } miphy_dev->phys[port]->phy = phy; ret = miphy365x_of_probe(child, miphy_phy); if (ret) - return ret; + goto put_child; phy_set_drvdata(phy, miphy_dev->phys[port]); @@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev) &miphy_phy->ctrlreg); if (ret) { dev_err(&pdev->dev, "No sysconfig offset found\n"); - return ret; + goto put_child; } } provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); return PTR_ERR_OR_ZERO(provider); +put_child: + of_node_put(child); + return ret; } static const struct of_device_id miphy365x_of_match[] = { diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c index f30b28bd41fe..c0e7b4b0cf5c 100644 --- a/drivers/phy/phy-mt65xx-usb3.c +++ b/drivers/phy/phy-mt65xx-usb3.c @@ -17,6 +17,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/phy/phy.h> @@ -27,6 +28,7 @@ * relative to USB3_SIF2_BASE base address */ #define SSUSB_SIFSLV_SPLLC 0x0000 +#define SSUSB_SIFSLV_U2FREQ 0x0100 /* offsets of sub-segment in each port registers */ #define SSUSB_SIFSLV_U2PHY_COM_BASE 0x0000 @@ -41,6 +43,7 @@ #define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18) #define U3P_USBPHYACR5 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0014) +#define PA5_RG_U2_HSTX_SRCAL_EN BIT(15) #define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12) #define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12) #define PA5_RG_U2_HS_100U_U3_EN BIT(11) @@ -49,6 +52,8 @@ #define PA6_RG_U2_ISO_EN BIT(31) #define PA6_RG_U2_BC11_SW_EN BIT(23) #define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20) +#define PA6_RG_U2_SQTH GENMASK(3, 0) +#define PA6_RG_U2_SQTH_VAL(x) (0xf & (x)) #define U3P_U2PHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0020) #define P2C_RG_USB20_GPIO_CTL BIT(9) @@ -111,6 +116,24 @@ #define XC3_RG_U3_XTAL_RX_PWD BIT(9) #define XC3_RG_U3_FRC_XTAL_RX_PWD BIT(8) +#define U3P_U2FREQ_FMCR0 (SSUSB_SIFSLV_U2FREQ + 0x00) +#define P2F_RG_MONCLK_SEL GENMASK(27, 26) +#define P2F_RG_MONCLK_SEL_VAL(x) ((0x3 & (x)) << 26) +#define P2F_RG_FREQDET_EN BIT(24) +#define P2F_RG_CYCLECNT GENMASK(23, 0) +#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x)) + +#define U3P_U2FREQ_VALUE (SSUSB_SIFSLV_U2FREQ + 0x0c) + +#define U3P_U2FREQ_FMMONR1 (SSUSB_SIFSLV_U2FREQ + 0x10) +#define P2F_USB_FM_VALID BIT(0) +#define P2F_RG_FRCK_EN BIT(8) + +#define U3P_REF_CLK 26 /* MHZ */ +#define U3P_SLEW_RATE_COEF 28 +#define U3P_SR_COEF_DIVISOR 1000 +#define U3P_FM_DET_CYCLE_CNT 1024 + struct mt65xx_phy_instance { struct phy *phy; void __iomem *port_base; @@ -126,6 +149,77 @@ struct mt65xx_u3phy { int nphys; }; +static void hs_slew_rate_calibrate(struct mt65xx_u3phy *u3phy, + struct mt65xx_phy_instance *instance) +{ + void __iomem *sif_base = u3phy->sif_base; + int calibration_val; + int fm_out; + u32 tmp; + + /* enable USB ring oscillator */ + tmp = readl(instance->port_base + U3P_USBPHYACR5); + tmp |= PA5_RG_U2_HSTX_SRCAL_EN; + writel(tmp, instance->port_base + U3P_USBPHYACR5); + udelay(1); + + /*enable free run clock */ + tmp = readl(sif_base + U3P_U2FREQ_FMMONR1); + tmp |= P2F_RG_FRCK_EN; + writel(tmp, sif_base + U3P_U2FREQ_FMMONR1); + + /* set cycle count as 1024, and select u2 channel */ + tmp = readl(sif_base + U3P_U2FREQ_FMCR0); + tmp &= ~(P2F_RG_CYCLECNT | P2F_RG_MONCLK_SEL); + tmp |= P2F_RG_CYCLECNT_VAL(U3P_FM_DET_CYCLE_CNT); + tmp |= P2F_RG_MONCLK_SEL_VAL(instance->index); + writel(tmp, sif_base + U3P_U2FREQ_FMCR0); + + /* enable frequency meter */ + tmp = readl(sif_base + U3P_U2FREQ_FMCR0); + tmp |= P2F_RG_FREQDET_EN; + writel(tmp, sif_base + U3P_U2FREQ_FMCR0); + + /* ignore return value */ + readl_poll_timeout(sif_base + U3P_U2FREQ_FMMONR1, tmp, + (tmp & P2F_USB_FM_VALID), 10, 200); + + fm_out = readl(sif_base + U3P_U2FREQ_VALUE); + + /* disable frequency meter */ + tmp = readl(sif_base + U3P_U2FREQ_FMCR0); + tmp &= ~P2F_RG_FREQDET_EN; + writel(tmp, sif_base + U3P_U2FREQ_FMCR0); + + /*disable free run clock */ + tmp = readl(sif_base + U3P_U2FREQ_FMMONR1); + tmp &= ~P2F_RG_FRCK_EN; + writel(tmp, sif_base + U3P_U2FREQ_FMMONR1); + + if (fm_out) { + /* ( 1024 / FM_OUT ) x reference clock frequency x 0.028 */ + tmp = U3P_FM_DET_CYCLE_CNT * U3P_REF_CLK * U3P_SLEW_RATE_COEF; + tmp /= fm_out; + calibration_val = DIV_ROUND_CLOSEST(tmp, U3P_SR_COEF_DIVISOR); + } else { + /* if FM detection fail, set default value */ + calibration_val = 4; + } + dev_dbg(u3phy->dev, "phy:%d, fm_out:%d, calib:%d\n", + instance->index, fm_out, calibration_val); + + /* set HS slew rate */ + tmp = readl(instance->port_base + U3P_USBPHYACR5); + tmp &= ~PA5_RG_U2_HSTX_SRCTRL; + tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val); + writel(tmp, instance->port_base + U3P_USBPHYACR5); + + /* disable USB ring oscillator */ + tmp = readl(instance->port_base + U3P_USBPHYACR5); + tmp &= ~PA5_RG_U2_HSTX_SRCAL_EN; + writel(tmp, instance->port_base + U3P_USBPHYACR5); +} + static void phy_instance_init(struct mt65xx_u3phy *u3phy, struct mt65xx_phy_instance *instance) { @@ -165,9 +259,10 @@ static void phy_instance_init(struct mt65xx_u3phy *u3phy, writel(tmp, port_base + U3P_U2PHYDTM0); } - /* DP/DM BC1.1 path Disable */ tmp = readl(port_base + U3P_USBPHYACR6); - tmp &= ~PA6_RG_U2_BC11_SW_EN; + tmp &= ~PA6_RG_U2_BC11_SW_EN; /* DP/DM BC1.1 path Disable */ + tmp &= ~PA6_RG_U2_SQTH; + tmp |= PA6_RG_U2_SQTH_VAL(2); writel(tmp, port_base + U3P_USBPHYACR6); tmp = readl(port_base + U3P_U3PHYA_DA_REG0); @@ -223,9 +318,9 @@ static void phy_instance_power_on(struct mt65xx_u3phy *u3phy, tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD; writel(tmp, u3phy->sif_base + U3P_XTALCTL3); - /* [mt8173]disable Change 100uA current from SSUSB */ + /* [mt8173]switch 100uA current to SSUSB */ tmp = readl(port_base + U3P_USBPHYACR5); - tmp &= ~PA5_RG_U2_HS_100U_U3_EN; + tmp |= PA5_RG_U2_HS_100U_U3_EN; writel(tmp, port_base + U3P_USBPHYACR5); } @@ -270,7 +365,7 @@ static void phy_instance_power_off(struct mt65xx_u3phy *u3phy, writel(tmp, port_base + U3P_USBPHYACR6); if (!index) { - /* (also disable)Change 100uA current switch to USB2.0 */ + /* switch 100uA current back to USB2.0 */ tmp = readl(port_base + U3P_USBPHYACR5); tmp &= ~PA5_RG_U2_HS_100U_U3_EN; writel(tmp, port_base + U3P_USBPHYACR5); @@ -340,6 +435,7 @@ static int mt65xx_phy_power_on(struct phy *phy) struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent); phy_instance_power_on(u3phy, instance); + hs_slew_rate_calibrate(u3phy, instance); return 0; } @@ -415,7 +511,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) struct resource *sif_res; struct mt65xx_u3phy *u3phy; struct resource res; - int port; + int port, retval; u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); if (!u3phy) @@ -447,31 +543,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) for_each_child_of_node(np, child_np) { struct mt65xx_phy_instance *instance; struct phy *phy; - int retval; instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); - if (!instance) - return -ENOMEM; + if (!instance) { + retval = -ENOMEM; + goto put_child; + } u3phy->phys[port] = instance; phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create phy\n"); - return PTR_ERR(phy); + retval = PTR_ERR(phy); + goto put_child; } retval = of_address_to_resource(child_np, 0, &res); if (retval) { dev_err(dev, "failed to get address resource(id-%d)\n", port); - return retval; + goto put_child; } instance->port_base = devm_ioremap_resource(&phy->dev, &res); if (IS_ERR(instance->port_base)) { dev_err(dev, "failed to remap phy regs\n"); - return PTR_ERR(instance->port_base); + retval = PTR_ERR(instance->port_base); + goto put_child; } instance->phy = phy; @@ -483,6 +582,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); return PTR_ERR_OR_ZERO(provider); +put_child: + of_node_put(child_np); + return retval; } static const struct of_device_id mt65xx_u3phy_id_table[] = { diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 0fe80589ffbe..c134989052f5 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -29,6 +29,8 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/phy/phy.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include <linux/of_platform.h> #define USB2PHY_DISCON_BYP_LATCH (1 << 31) @@ -97,22 +99,38 @@ static int omap_usb_set_peripheral(struct usb_otg *otg, return 0; } +static int omap_usb_phy_power(struct omap_usb *phy, int on) +{ + u32 val; + int ret; + + if (!phy->syscon_phy_power) { + omap_control_phy_power(phy->control_dev, on); + return 0; + } + + if (on) + val = phy->power_on; + else + val = phy->power_off; + + ret = regmap_update_bits(phy->syscon_phy_power, phy->power_reg, + phy->mask, val); + return ret; +} + static int omap_usb_power_off(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); - omap_control_phy_power(phy->control_dev, 0); - - return 0; + return omap_usb_phy_power(phy, false); } static int omap_usb_power_on(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); - omap_control_phy_power(phy->control_dev, 1); - - return 0; + return omap_usb_phy_power(phy, true); } static int omap_usb_init(struct phy *x) @@ -147,21 +165,38 @@ static const struct phy_ops ops = { static const struct usb_phy_data omap_usb2_data = { .label = "omap_usb2", .flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS, + .mask = OMAP_DEV_PHY_PD, + .power_off = OMAP_DEV_PHY_PD, }; static const struct usb_phy_data omap5_usb2_data = { .label = "omap5_usb2", .flags = 0, + .mask = OMAP_DEV_PHY_PD, + .power_off = OMAP_DEV_PHY_PD, }; static const struct usb_phy_data dra7x_usb2_data = { .label = "dra7x_usb2", .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, + .mask = OMAP_DEV_PHY_PD, + .power_off = OMAP_DEV_PHY_PD, +}; + +static const struct usb_phy_data dra7x_usb2_phy2_data = { + .label = "dra7x_usb2_phy2", + .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, + .mask = OMAP_USB2_PHY_PD, + .power_off = OMAP_USB2_PHY_PD, }; static const struct usb_phy_data am437x_usb2_data = { .label = "am437x_usb2", .flags = 0, + .mask = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD | + AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN, + .power_on = AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN, + .power_off = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD, }; static const struct of_device_id omap_usb2_id_table[] = { @@ -178,6 +213,10 @@ static const struct of_device_id omap_usb2_id_table[] = { .data = &dra7x_usb2_data, }, { + .compatible = "ti,dra7x-usb2-phy2", + .data = &dra7x_usb2_phy2_data, + }, + { .compatible = "ti,am437x-usb2", .data = &am437x_usb2_data, }, @@ -219,6 +258,9 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->phy.label = phy_data->label; phy->phy.otg = otg; phy->phy.type = USB_PHY_TYPE_USB2; + phy->mask = phy_data->mask; + phy->power_on = phy_data->power_on; + phy->power_off = phy_data->power_off; if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -228,21 +270,36 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; } - control_node = of_parse_phandle(node, "ctrl-module", 0); - if (!control_node) { - dev_err(&pdev->dev, "Failed to get control device phandle\n"); - return -EINVAL; - } + phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node, + "syscon-phy-power"); + if (IS_ERR(phy->syscon_phy_power)) { + dev_dbg(&pdev->dev, + "can't get syscon-phy-power, using control device\n"); + phy->syscon_phy_power = NULL; + + control_node = of_parse_phandle(node, "ctrl-module", 0); + if (!control_node) { + dev_err(&pdev->dev, + "Failed to get control device phandle\n"); + return -EINVAL; + } - control_pdev = of_find_device_by_node(control_node); - if (!control_pdev) { - dev_err(&pdev->dev, "Failed to get control device\n"); - return -EINVAL; + control_pdev = of_find_device_by_node(control_node); + if (!control_pdev) { + dev_err(&pdev->dev, "Failed to get control device\n"); + return -EINVAL; + } + phy->control_dev = &control_pdev->dev; + } else { + if (of_property_read_u32_index(node, + "syscon-phy-power", 1, + &phy->power_reg)) { + dev_err(&pdev->dev, + "couldn't get power reg. offset\n"); + return -EINVAL; + } } - phy->control_dev = &control_pdev->dev; - omap_control_phy_power(phy->control_dev, 0); - otg->set_host = omap_usb_set_host; otg->set_peripheral = omap_usb_set_peripheral; if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS) @@ -261,6 +318,7 @@ static int omap_usb2_probe(struct platform_device *pdev) } phy_set_drvdata(generic_phy, phy); + omap_usb_power_off(generic_phy); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c new file mode 100644 index 000000000000..ef332ef4abc7 --- /dev/null +++ b/drivers/phy/phy-rcar-gen3-usb2.c @@ -0,0 +1,378 @@ +/* + * Renesas R-Car Gen3 for USB2.0 PHY driver + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * This is based on the phy-rcar-gen2 driver: + * Copyright (C) 2014 Renesas Solutions Corp. + * Copyright (C) 2014 Cogent Embedded, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +/******* USB2.0 Host registers (original offset is +0x200) *******/ +#define USB2_INT_ENABLE 0x000 +#define USB2_USBCTR 0x00c +#define USB2_SPD_RSM_TIMSET 0x10c +#define USB2_OC_TIMSET 0x110 +#define USB2_COMMCTRL 0x600 +#define USB2_OBINTSTA 0x604 +#define USB2_OBINTEN 0x608 +#define USB2_VBCTRL 0x60c +#define USB2_LINECTRL1 0x610 +#define USB2_ADPCTRL 0x630 + +/* INT_ENABLE */ +#define USB2_INT_ENABLE_UCOM_INTEN BIT(3) +#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) +#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) +#define USB2_INT_ENABLE_INIT (USB2_INT_ENABLE_UCOM_INTEN | \ + USB2_INT_ENABLE_USBH_INTB_EN | \ + USB2_INT_ENABLE_USBH_INTA_EN) + +/* USBCTR */ +#define USB2_USBCTR_DIRPD BIT(2) +#define USB2_USBCTR_PLL_RST BIT(1) + +/* SPD_RSM_TIMSET */ +#define USB2_SPD_RSM_TIMSET_INIT 0x014e029b + +/* OC_TIMSET */ +#define USB2_OC_TIMSET_INIT 0x000209ab + +/* COMMCTRL */ +#define USB2_COMMCTRL_OTG_PERI BIT(31) /* 1 = Peripheral mode */ + +/* OBINTSTA and OBINTEN */ +#define USB2_OBINT_SESSVLDCHG BIT(12) +#define USB2_OBINT_IDDIGCHG BIT(11) +#define USB2_OBINT_BITS (USB2_OBINT_SESSVLDCHG | \ + USB2_OBINT_IDDIGCHG) + +/* VBCTRL */ +#define USB2_VBCTRL_DRVVBUSSEL BIT(8) + +/* LINECTRL1 */ +#define USB2_LINECTRL1_DPRPD_EN BIT(19) +#define USB2_LINECTRL1_DP_RPD BIT(18) +#define USB2_LINECTRL1_DMRPD_EN BIT(17) +#define USB2_LINECTRL1_DM_RPD BIT(16) + +/* ADPCTRL */ +#define USB2_ADPCTRL_OTGSESSVLD BIT(20) +#define USB2_ADPCTRL_IDDIG BIT(19) +#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */ +#define USB2_ADPCTRL_DRVVBUS BIT(4) + +/******* HSUSB registers (original offset is +0x100) *******/ +#define HSUSB_LPSTS 0x02 +#define HSUSB_UGCTRL2 0x84 + +/* Low Power Status register (LPSTS) */ +#define HSUSB_LPSTS_SUSPM 0x4000 + +/* USB General control register 2 (UGCTRL2) */ +#define HSUSB_UGCTRL2_MASK 0x00000031 /* bit[31:6] should be 0 */ +#define HSUSB_UGCTRL2_USB0SEL 0x00000030 +#define HSUSB_UGCTRL2_USB0SEL_HOST 0x00000010 +#define HSUSB_UGCTRL2_USB0SEL_HS_USB 0x00000020 +#define HSUSB_UGCTRL2_USB0SEL_OTG 0x00000030 + +struct rcar_gen3_data { + void __iomem *base; + struct clk *clk; +}; + +struct rcar_gen3_chan { + struct rcar_gen3_data usb2; + struct rcar_gen3_data hsusb; + struct phy *phy; +}; + +static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host) +{ + void __iomem *usb2_base = ch->usb2.base; + u32 val = readl(usb2_base + USB2_COMMCTRL); + + dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, host); + if (host) + val &= ~USB2_COMMCTRL_OTG_PERI; + else + val |= USB2_COMMCTRL_OTG_PERI; + writel(val, usb2_base + USB2_COMMCTRL); +} + +static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm) +{ + void __iomem *usb2_base = ch->usb2.base; + u32 val = readl(usb2_base + USB2_LINECTRL1); + + dev_vdbg(&ch->phy->dev, "%s: %08x, %d, %d\n", __func__, val, dp, dm); + val &= ~(USB2_LINECTRL1_DP_RPD | USB2_LINECTRL1_DM_RPD); + if (dp) + val |= USB2_LINECTRL1_DP_RPD; + if (dm) + val |= USB2_LINECTRL1_DM_RPD; + writel(val, usb2_base + USB2_LINECTRL1); +} + +static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus) +{ + void __iomem *usb2_base = ch->usb2.base; + u32 val = readl(usb2_base + USB2_ADPCTRL); + + dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, vbus); + if (vbus) + val |= USB2_ADPCTRL_DRVVBUS; + else + val &= ~USB2_ADPCTRL_DRVVBUS; + writel(val, usb2_base + USB2_ADPCTRL); +} + +static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch) +{ + rcar_gen3_set_linectrl(ch, 1, 1); + rcar_gen3_set_host_mode(ch, 1); + rcar_gen3_enable_vbus_ctrl(ch, 1); +} + +static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch) +{ + rcar_gen3_set_linectrl(ch, 0, 1); + rcar_gen3_set_host_mode(ch, 0); + rcar_gen3_enable_vbus_ctrl(ch, 0); +} + +static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch) +{ + return !!(readl(ch->usb2.base + USB2_ADPCTRL) & + USB2_ADPCTRL_OTGSESSVLD); +} + +static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) +{ + return !!(readl(ch->usb2.base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); +} + +static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch) +{ + bool is_host = true; + + /* B-device? */ + if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch)) + is_host = false; + + if (is_host) + rcar_gen3_init_for_host(ch); + else + rcar_gen3_init_for_peri(ch); +} + +static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) +{ + void __iomem *usb2_base = ch->usb2.base; + u32 val; + + val = readl(usb2_base + USB2_VBCTRL); + writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); + val = readl(usb2_base + USB2_OBINTEN); + writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); + val = readl(usb2_base + USB2_ADPCTRL); + writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); + val = readl(usb2_base + USB2_LINECTRL1); + rcar_gen3_set_linectrl(ch, 0, 0); + writel(val | USB2_LINECTRL1_DPRPD_EN | USB2_LINECTRL1_DMRPD_EN, + usb2_base + USB2_LINECTRL1); + + rcar_gen3_device_recognition(ch); +} + +static int rcar_gen3_phy_usb2_init(struct phy *p) +{ + struct rcar_gen3_chan *channel = phy_get_drvdata(p); + void __iomem *usb2_base = channel->usb2.base; + void __iomem *hsusb_base = channel->hsusb.base; + u32 val; + + /* Initialize USB2 part */ + writel(USB2_INT_ENABLE_INIT, usb2_base + USB2_INT_ENABLE); + writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); + writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); + + /* Initialize HSUSB part */ + if (hsusb_base) { + val = readl(hsusb_base + HSUSB_UGCTRL2); + val = (val & ~HSUSB_UGCTRL2_USB0SEL) | + HSUSB_UGCTRL2_USB0SEL_OTG; + writel(val & HSUSB_UGCTRL2_MASK, hsusb_base + HSUSB_UGCTRL2); + + /* Initialize otg part */ + rcar_gen3_init_otg(channel); + } + + return 0; +} + +static int rcar_gen3_phy_usb2_exit(struct phy *p) +{ + struct rcar_gen3_chan *channel = phy_get_drvdata(p); + + writel(0, channel->usb2.base + USB2_INT_ENABLE); + + return 0; +} + +static int rcar_gen3_phy_usb2_power_on(struct phy *p) +{ + struct rcar_gen3_chan *channel = phy_get_drvdata(p); + void __iomem *usb2_base = channel->usb2.base; + void __iomem *hsusb_base = channel->hsusb.base; + u32 val; + + val = readl(usb2_base + USB2_USBCTR); + val |= USB2_USBCTR_PLL_RST; + writel(val, usb2_base + USB2_USBCTR); + val &= ~USB2_USBCTR_PLL_RST; + writel(val, usb2_base + USB2_USBCTR); + + /* + * TODO: To reduce power consuming, this driver should set the SUSPM + * after the PHY detects ID pin as peripheral. + */ + if (hsusb_base) { + /* Power on HSUSB PHY */ + val = readw(hsusb_base + HSUSB_LPSTS); + val |= HSUSB_LPSTS_SUSPM; + writew(val, hsusb_base + HSUSB_LPSTS); + } + + return 0; +} + +static int rcar_gen3_phy_usb2_power_off(struct phy *p) +{ + struct rcar_gen3_chan *channel = phy_get_drvdata(p); + void __iomem *hsusb_base = channel->hsusb.base; + u32 val; + + if (hsusb_base) { + /* Power off HSUSB PHY */ + val = readw(hsusb_base + HSUSB_LPSTS); + val &= ~HSUSB_LPSTS_SUSPM; + writew(val, hsusb_base + HSUSB_LPSTS); + } + + return 0; +} + +static struct phy_ops rcar_gen3_phy_usb2_ops = { + .init = rcar_gen3_phy_usb2_init, + .exit = rcar_gen3_phy_usb2_exit, + .power_on = rcar_gen3_phy_usb2_power_on, + .power_off = rcar_gen3_phy_usb2_power_off, + .owner = THIS_MODULE, +}; + +static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) +{ + struct rcar_gen3_chan *ch = _ch; + void __iomem *usb2_base = ch->usb2.base; + u32 status = readl(usb2_base + USB2_OBINTSTA); + irqreturn_t ret = IRQ_NONE; + + if (status & USB2_OBINT_BITS) { + dev_vdbg(&ch->phy->dev, "%s: %08x\n", __func__, status); + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } + + return ret; +} + +static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { + { .compatible = "renesas,usb2-phy-r8a7795" }, + { } +}; +MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table); + +static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rcar_gen3_chan *channel; + struct phy_provider *provider; + struct resource *res; + + if (!dev->of_node) { + dev_err(dev, "This driver needs device tree\n"); + return -EINVAL; + } + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb2_host"); + channel->usb2.base = devm_ioremap_resource(dev, res); + if (IS_ERR(channel->usb2.base)) + return PTR_ERR(channel->usb2.base); + + /* "hsusb" memory resource is optional */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsusb"); + + /* To avoid error message by devm_ioremap_resource() */ + if (res) { + int irq; + + channel->hsusb.base = devm_ioremap_resource(dev, res); + if (IS_ERR(channel->hsusb.base)) + channel->hsusb.base = NULL; + /* call request_irq for OTG */ + irq = platform_get_irq(pdev, 0); + if (irq >= 0) + irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(dev), + channel); + if (irq < 0) + dev_err(dev, "No irq handler (%d)\n", irq); + } + + /* devm_phy_create() will call pm_runtime_enable(dev); */ + channel->phy = devm_phy_create(dev, NULL, &rcar_gen3_phy_usb2_ops); + if (IS_ERR(channel->phy)) { + dev_err(dev, "Failed to create USB2 PHY\n"); + return PTR_ERR(channel->phy); + } + + phy_set_drvdata(channel->phy, channel); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + dev_err(dev, "Failed to register PHY provider\n"); + + return PTR_ERR_OR_ZERO(provider); +} + +static struct platform_driver rcar_gen3_phy_usb2_driver = { + .driver = { + .name = "phy_rcar_gen3_usb2", + .of_match_table = rcar_gen3_phy_usb2_match_table, + }, + .probe = rcar_gen3_phy_usb2_probe, +}; +module_platform_driver(rcar_gen3_phy_usb2_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Renesas R-Car Gen3 USB 2.0 PHY"); +MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>"); diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 91d6f342c565..33a80eba1cb4 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -15,12 +15,14 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> @@ -36,105 +38,271 @@ #define SIDDQ_ON BIT(13) #define SIDDQ_OFF (0 << 13) +struct rockchip_usb_phys { + int reg; + const char *pll_name; +}; + +struct rockchip_usb_phy_pdata { + struct rockchip_usb_phys *phys; +}; + +struct rockchip_usb_phy_base { + struct device *dev; + struct regmap *reg_base; + const struct rockchip_usb_phy_pdata *pdata; +}; + struct rockchip_usb_phy { + struct rockchip_usb_phy_base *base; + struct device_node *np; unsigned int reg_offset; - struct regmap *reg_base; struct clk *clk; + struct clk *clk480m; + struct clk_hw clk480m_hw; struct phy *phy; }; static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy, bool siddq) { - return regmap_write(phy->reg_base, phy->reg_offset, + return regmap_write(phy->base->reg_base, phy->reg_offset, SIDDQ_WRITE_ENA | (siddq ? SIDDQ_ON : SIDDQ_OFF)); } -static int rockchip_usb_phy_power_off(struct phy *_phy) +static unsigned long rockchip_usb_phy480m_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { - struct rockchip_usb_phy *phy = phy_get_drvdata(_phy); - int ret = 0; + return 480000000; +} + +static void rockchip_usb_phy480m_disable(struct clk_hw *hw) +{ + struct rockchip_usb_phy *phy = container_of(hw, + struct rockchip_usb_phy, + clk480m_hw); /* Power down usb phy analog blocks by set siddq 1 */ - ret = rockchip_usb_phy_power(phy, 1); - if (ret) - return ret; + rockchip_usb_phy_power(phy, 1); +} - clk_disable_unprepare(phy->clk); +static int rockchip_usb_phy480m_enable(struct clk_hw *hw) +{ + struct rockchip_usb_phy *phy = container_of(hw, + struct rockchip_usb_phy, + clk480m_hw); - return 0; + /* Power up usb phy analog blocks by set siddq 0 */ + return rockchip_usb_phy_power(phy, 0); } -static int rockchip_usb_phy_power_on(struct phy *_phy) +static int rockchip_usb_phy480m_is_enabled(struct clk_hw *hw) { - struct rockchip_usb_phy *phy = phy_get_drvdata(_phy); - int ret = 0; + struct rockchip_usb_phy *phy = container_of(hw, + struct rockchip_usb_phy, + clk480m_hw); + int ret; + u32 val; - ret = clk_prepare_enable(phy->clk); - if (ret) + ret = regmap_read(phy->base->reg_base, phy->reg_offset, &val); + if (ret < 0) return ret; - /* Power up usb phy analog blocks by set siddq 0 */ - ret = rockchip_usb_phy_power(phy, 0); - if (ret) { - clk_disable_unprepare(phy->clk); - return ret; - } + return (val & SIDDQ_ON) ? 0 : 1; +} + +static const struct clk_ops rockchip_usb_phy480m_ops = { + .enable = rockchip_usb_phy480m_enable, + .disable = rockchip_usb_phy480m_disable, + .is_enabled = rockchip_usb_phy480m_is_enabled, + .recalc_rate = rockchip_usb_phy480m_recalc_rate, +}; + +static int rockchip_usb_phy_power_off(struct phy *_phy) +{ + struct rockchip_usb_phy *phy = phy_get_drvdata(_phy); + + clk_disable_unprepare(phy->clk480m); return 0; } +static int rockchip_usb_phy_power_on(struct phy *_phy) +{ + struct rockchip_usb_phy *phy = phy_get_drvdata(_phy); + + return clk_prepare_enable(phy->clk480m); +} + static const struct phy_ops ops = { .power_on = rockchip_usb_phy_power_on, .power_off = rockchip_usb_phy_power_off, .owner = THIS_MODULE, }; -static int rockchip_usb_phy_probe(struct platform_device *pdev) +static void rockchip_usb_phy_action(void *data) +{ + struct rockchip_usb_phy *rk_phy = data; + + of_clk_del_provider(rk_phy->np); + clk_unregister(rk_phy->clk480m); + + if (rk_phy->clk) + clk_put(rk_phy->clk); +} + +static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base, + struct device_node *child) { - struct device *dev = &pdev->dev; struct rockchip_usb_phy *rk_phy; - struct phy_provider *phy_provider; - struct device_node *child; - struct regmap *grf; unsigned int reg_offset; - int err; + const char *clk_name; + struct clk_init_data init; + int err, i; - grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - if (IS_ERR(grf)) { - dev_err(&pdev->dev, "Missing rockchip,grf property\n"); - return PTR_ERR(grf); + rk_phy = devm_kzalloc(base->dev, sizeof(*rk_phy), GFP_KERNEL); + if (!rk_phy) + return -ENOMEM; + + rk_phy->base = base; + rk_phy->np = child; + + if (of_property_read_u32(child, "reg", ®_offset)) { + dev_err(base->dev, "missing reg property in node %s\n", + child->name); + return -EINVAL; } - for_each_available_child_of_node(dev->of_node, child) { - rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); - if (!rk_phy) - return -ENOMEM; - - if (of_property_read_u32(child, "reg", ®_offset)) { - dev_err(dev, "missing reg property in node %s\n", - child->name); - return -EINVAL; + rk_phy->reg_offset = reg_offset; + + rk_phy->clk = of_clk_get_by_name(child, "phyclk"); + if (IS_ERR(rk_phy->clk)) + rk_phy->clk = NULL; + + i = 0; + init.name = NULL; + while (base->pdata->phys[i].reg) { + if (base->pdata->phys[i].reg == reg_offset) { + init.name = base->pdata->phys[i].pll_name; + break; } + i++; + } - rk_phy->reg_offset = reg_offset; - rk_phy->reg_base = grf; + if (!init.name) { + dev_err(base->dev, "phy data not found\n"); + return -EINVAL; + } - rk_phy->clk = of_clk_get_by_name(child, "phyclk"); - if (IS_ERR(rk_phy->clk)) - rk_phy->clk = NULL; + if (rk_phy->clk) { + clk_name = __clk_get_name(rk_phy->clk); + init.flags = 0; + init.parent_names = &clk_name; + init.num_parents = 1; + } else { + init.flags = CLK_IS_ROOT; + init.parent_names = NULL; + init.num_parents = 0; + } - rk_phy->phy = devm_phy_create(dev, child, &ops); - if (IS_ERR(rk_phy->phy)) { - dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(rk_phy->phy); - } - phy_set_drvdata(rk_phy->phy, rk_phy); + init.ops = &rockchip_usb_phy480m_ops; + rk_phy->clk480m_hw.init = &init; + + rk_phy->clk480m = clk_register(base->dev, &rk_phy->clk480m_hw); + if (IS_ERR(rk_phy->clk480m)) { + err = PTR_ERR(rk_phy->clk480m); + goto err_clk; + } + + err = of_clk_add_provider(child, of_clk_src_simple_get, + rk_phy->clk480m); + if (err < 0) + goto err_clk_prov; + + err = devm_add_action(base->dev, rockchip_usb_phy_action, rk_phy); + if (err) + goto err_devm_action; + + rk_phy->phy = devm_phy_create(base->dev, child, &ops); + if (IS_ERR(rk_phy->phy)) { + dev_err(base->dev, "failed to create PHY\n"); + return PTR_ERR(rk_phy->phy); + } + phy_set_drvdata(rk_phy->phy, rk_phy); + + /* only power up usb phy when it use, so disable it when init*/ + return rockchip_usb_phy_power(rk_phy, 1); + +err_devm_action: + of_clk_del_provider(child); +err_clk_prov: + clk_unregister(rk_phy->clk480m); +err_clk: + if (rk_phy->clk) + clk_put(rk_phy->clk); + return err; +} + +static const struct rockchip_usb_phy_pdata rk3066a_pdata = { + .phys = (struct rockchip_usb_phys[]){ + { .reg = 0x17c, .pll_name = "sclk_otgphy0_480m" }, + { .reg = 0x188, .pll_name = "sclk_otgphy1_480m" }, + { /* sentinel */ } + }, +}; - /* only power up usb phy when it use, so disable it when init*/ - err = rockchip_usb_phy_power(rk_phy, 1); - if (err) +static const struct rockchip_usb_phy_pdata rk3188_pdata = { + .phys = (struct rockchip_usb_phys[]){ + { .reg = 0x10c, .pll_name = "sclk_otgphy0_480m" }, + { .reg = 0x11c, .pll_name = "sclk_otgphy1_480m" }, + { /* sentinel */ } + }, +}; + +static const struct rockchip_usb_phy_pdata rk3288_pdata = { + .phys = (struct rockchip_usb_phys[]){ + { .reg = 0x320, .pll_name = "sclk_otgphy0_480m" }, + { .reg = 0x334, .pll_name = "sclk_otgphy1_480m" }, + { .reg = 0x348, .pll_name = "sclk_otgphy2_480m" }, + { /* sentinel */ } + }, +}; + +static int rockchip_usb_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_usb_phy_base *phy_base; + struct phy_provider *phy_provider; + const struct of_device_id *match; + struct device_node *child; + int err; + + phy_base = devm_kzalloc(dev, sizeof(*phy_base), GFP_KERNEL); + if (!phy_base) + return -ENOMEM; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match || !match->data) { + dev_err(dev, "missing phy data\n"); + return -EINVAL; + } + + phy_base->pdata = match->data; + + phy_base->dev = dev; + phy_base->reg_base = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); + if (IS_ERR(phy_base->reg_base)) { + dev_err(&pdev->dev, "Missing rockchip,grf property\n"); + return PTR_ERR(phy_base->reg_base); + } + + for_each_available_child_of_node(dev->of_node, child) { + err = rockchip_usb_phy_init(phy_base, child); + if (err) { + of_node_put(child); return err; + } } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); @@ -142,7 +310,9 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) } static const struct of_device_id rockchip_usb_phy_dt_ids[] = { - { .compatible = "rockchip,rk3288-usb-phy" }, + { .compatible = "rockchip,rk3066a-usb-phy", .data = &rk3066a_pdata }, + { .compatible = "rockchip,rk3188-usb-phy", .data = &rk3188_pdata }, + { .compatible = "rockchip,rk3288-usb-phy", .data = &rk3288_pdata }, {} }; diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index b12964b70625..bae54f7a1f48 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -32,6 +32,7 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/phy/phy.h> #include <linux/phy/phy-sun4i-usb.h> @@ -46,6 +47,9 @@ #define REG_PHYBIST 0x08 #define REG_PHYTUNE 0x0c #define REG_PHYCTL_A33 0x10 +#define REG_PHY_UNK_H3 0x20 + +#define REG_PMU_UNK_H3 0x10 #define PHYCTL_DATA BIT(7) @@ -79,7 +83,7 @@ #define PHY_DISCON_TH_SEL 0x2a #define PHY_SQUELCH_DETECT 0x3c -#define MAX_PHYS 3 +#define MAX_PHYS 4 /* * Note do not raise the debounce time, we must report Vusb high within 100ms @@ -88,12 +92,24 @@ #define DEBOUNCE_TIME msecs_to_jiffies(50) #define POLL_TIME msecs_to_jiffies(250) +enum sun4i_usb_phy_type { + sun4i_a10_phy, + sun8i_a33_phy, + sun8i_h3_phy, +}; + +struct sun4i_usb_phy_cfg { + int num_phys; + enum sun4i_usb_phy_type type; + u32 disc_thresh; + u8 phyctl_offset; + bool dedicated_clocks; +}; + struct sun4i_usb_phy_data { void __iomem *base; + const struct sun4i_usb_phy_cfg *cfg; struct mutex mutex; - int num_phys; - u32 disc_thresh; - bool has_a33_phyctl; struct sun4i_usb_phy { struct phy *phy; void __iomem *pmu; @@ -159,17 +175,14 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data, { struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy); u32 temp, usbc_bit = BIT(phy->index * 2); - void *phyctl; + void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset; int i; mutex_lock(&phy_data->mutex); - if (phy_data->has_a33_phyctl) { - phyctl = phy_data->base + REG_PHYCTL_A33; + if (phy_data->cfg->type == sun8i_a33_phy) { /* A33 needs us to set phyctl to 0 explicitly */ writel(0, phyctl); - } else { - phyctl = phy_data->base + REG_PHYCTL_A10; } for (i = 0; i < len; i++) { @@ -230,6 +243,7 @@ static int sun4i_usb_phy_init(struct phy *_phy) struct sun4i_usb_phy *phy = phy_get_drvdata(_phy); struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); int ret; + u32 val; ret = clk_prepare_enable(phy->clk); if (ret) @@ -241,15 +255,26 @@ static int sun4i_usb_phy_init(struct phy *_phy) return ret; } - /* Enable USB 45 Ohm resistor calibration */ - if (phy->index == 0) - sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1); + if (data->cfg->type == sun8i_h3_phy) { + if (phy->index == 0) { + val = readl(data->base + REG_PHY_UNK_H3); + writel(val & ~1, data->base + REG_PHY_UNK_H3); + } - /* Adjust PHY's magnitude and rate */ - sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5); + val = readl(phy->pmu + REG_PMU_UNK_H3); + writel(val & ~2, phy->pmu + REG_PMU_UNK_H3); + } else { + /* Enable USB 45 Ohm resistor calibration */ + if (phy->index == 0) + sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1); - /* Disconnect threshold adjustment */ - sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL, data->disc_thresh, 2); + /* Adjust PHY's magnitude and rate */ + sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5); + + /* Disconnect threshold adjustment */ + sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL, + data->cfg->disc_thresh, 2); + } sun4i_usb_phy_passby(phy, 1); @@ -476,7 +501,7 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev, { struct sun4i_usb_phy_data *data = dev_get_drvdata(dev); - if (args->args[0] >= data->num_phys) + if (args->args[0] >= data->cfg->num_phys) return ERR_PTR(-ENODEV); return data->phys[args->args[0]].phy; @@ -511,7 +536,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_provider *phy_provider; - bool dedicated_clocks; struct resource *res; int i, ret; @@ -522,29 +546,9 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) mutex_init(&data->mutex); INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan); dev_set_drvdata(dev, data); - - if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") || - of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") || - of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy")) - data->num_phys = 2; - else - data->num_phys = 3; - - if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") || - of_device_is_compatible(np, "allwinner,sun7i-a20-usb-phy")) - data->disc_thresh = 2; - else - data->disc_thresh = 3; - - if (of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy") || - of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") || - of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy")) - dedicated_clocks = true; - else - dedicated_clocks = false; - - if (of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy")) - data->has_a33_phyctl = true; + data->cfg = of_device_get_match_data(dev); + if (!data->cfg) + return -EINVAL; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl"); data->base = devm_ioremap_resource(dev, res); @@ -590,7 +594,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) } } - for (i = 0; i < data->num_phys; i++) { + for (i = 0; i < data->cfg->num_phys; i++) { struct sun4i_usb_phy *phy = data->phys + i; char name[16]; @@ -602,7 +606,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) phy->vbus = NULL; } - if (dedicated_clocks) + if (data->cfg->dedicated_clocks) snprintf(name, sizeof(name), "usb%d_phy", i); else strlcpy(name, "usb_phy", sizeof(name)); @@ -689,13 +693,69 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) return 0; } +static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = { + .num_phys = 3, + .type = sun4i_a10_phy, + .disc_thresh = 3, + .phyctl_offset = REG_PHYCTL_A10, + .dedicated_clocks = false, +}; + +static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = { + .num_phys = 2, + .type = sun4i_a10_phy, + .disc_thresh = 2, + .phyctl_offset = REG_PHYCTL_A10, + .dedicated_clocks = false, +}; + +static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = { + .num_phys = 3, + .type = sun4i_a10_phy, + .disc_thresh = 3, + .phyctl_offset = REG_PHYCTL_A10, + .dedicated_clocks = true, +}; + +static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = { + .num_phys = 3, + .type = sun4i_a10_phy, + .disc_thresh = 2, + .phyctl_offset = REG_PHYCTL_A10, + .dedicated_clocks = false, +}; + +static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = { + .num_phys = 2, + .type = sun4i_a10_phy, + .disc_thresh = 3, + .phyctl_offset = REG_PHYCTL_A10, + .dedicated_clocks = true, +}; + +static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = { + .num_phys = 2, + .type = sun8i_a33_phy, + .disc_thresh = 3, + .phyctl_offset = REG_PHYCTL_A33, + .dedicated_clocks = true, +}; + +static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = { + .num_phys = 4, + .type = sun8i_h3_phy, + .disc_thresh = 3, + .dedicated_clocks = true, +}; + static const struct of_device_id sun4i_usb_phy_of_match[] = { - { .compatible = "allwinner,sun4i-a10-usb-phy" }, - { .compatible = "allwinner,sun5i-a13-usb-phy" }, - { .compatible = "allwinner,sun6i-a31-usb-phy" }, - { .compatible = "allwinner,sun7i-a20-usb-phy" }, - { .compatible = "allwinner,sun8i-a23-usb-phy" }, - { .compatible = "allwinner,sun8i-a33-usb-phy" }, + { .compatible = "allwinner,sun4i-a10-usb-phy", .data = &sun4i_a10_cfg }, + { .compatible = "allwinner,sun5i-a13-usb-phy", .data = &sun5i_a13_cfg }, + { .compatible = "allwinner,sun6i-a31-usb-phy", .data = &sun6i_a31_cfg }, + { .compatible = "allwinner,sun7i-a20-usb-phy", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a23-usb-phy", .data = &sun8i_a23_cfg }, + { .compatible = "allwinner,sun8i-a33-usb-phy", .data = &sun8i_a33_cfg }, + { .compatible = "allwinner,sun8i-h3-usb-phy", .data = &sun8i_h3_cfg }, { }, }; MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match); diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 93bc1120af12..0a477d24cf76 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -56,6 +56,18 @@ #define SATA_PLL_SOFT_RESET BIT(18) +#define PIPE3_PHY_PWRCTL_CLK_CMD_MASK 0x003FC000 +#define PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 14 + +#define PIPE3_PHY_PWRCTL_CLK_FREQ_MASK 0xFFC00000 +#define PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 22 + +#define PIPE3_PHY_TX_RX_POWERON 0x3 +#define PIPE3_PHY_TX_RX_POWEROFF 0x0 + +#define PCIE_PCS_MASK 0xFF0000 +#define PCIE_PCS_DELAY_COUNT_SHIFT 0x10 + /* * This is an Empirical value that works, need to confirm the actual * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status @@ -86,8 +98,12 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; + struct regmap *phy_power_syscon; /* ctrl. reg. acces */ + struct regmap *pcs_syscon; /* ctrl. reg. acces */ struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */ unsigned int dpll_reset_reg; /* reg. index within syscon */ + unsigned int power_reg; /* power reg. index within syscon */ + unsigned int pcie_pcs_reg; /* pcs reg. index in syscon */ bool sata_refclk_enabled; }; @@ -144,20 +160,49 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); static int ti_pipe3_power_off(struct phy *x) { + u32 val; + int ret; struct ti_pipe3 *phy = phy_get_drvdata(x); - omap_control_phy_power(phy->control_dev, 0); + if (!phy->phy_power_syscon) { + omap_control_phy_power(phy->control_dev, 0); + return 0; + } - return 0; + val = PIPE3_PHY_TX_RX_POWEROFF << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT; + + ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg, + PIPE3_PHY_PWRCTL_CLK_CMD_MASK, val); + return ret; } static int ti_pipe3_power_on(struct phy *x) { + u32 val; + u32 mask; + int ret; + unsigned long rate; struct ti_pipe3 *phy = phy_get_drvdata(x); - omap_control_phy_power(phy->control_dev, 1); + if (!phy->phy_power_syscon) { + omap_control_phy_power(phy->control_dev, 1); + return 0; + } - return 0; + rate = clk_get_rate(phy->sys_clk); + if (!rate) { + dev_err(phy->dev, "Invalid clock rate\n"); + return -EINVAL; + } + rate = rate / 1000000; + mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK | + OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK; + val = PIPE3_PHY_TX_RX_POWERON << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT; + val |= rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT; + + ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg, + mask, val); + return ret; } static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy) @@ -229,8 +274,15 @@ static int ti_pipe3_init(struct phy *x) * 18-1804. */ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { - omap_control_pcie_pcs(phy->control_dev, 0x96); - return 0; + if (!phy->pcs_syscon) { + omap_control_pcie_pcs(phy->control_dev, 0x96); + return 0; + } + + val = 0x96 << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT; + ret = regmap_update_bits(phy->pcs_syscon, phy->pcie_pcs_reg, + PCIE_PCS_MASK, val); + return ret; } /* Bring it out of IDLE if it is IDLE */ @@ -308,51 +360,15 @@ static const struct phy_ops ops = { static const struct of_device_id ti_pipe3_id_table[]; -static int ti_pipe3_probe(struct platform_device *pdev) +static int ti_pipe3_get_clk(struct ti_pipe3 *phy) { - struct ti_pipe3 *phy; - struct phy *generic_phy; - struct phy_provider *phy_provider; - struct resource *res; - struct device_node *node = pdev->dev.of_node; - struct device_node *control_node; - struct platform_device *control_pdev; - const struct of_device_id *match; struct clk *clk; + struct device *dev = phy->dev; + struct device_node *node = dev->of_node; - phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); - if (!phy) - return -ENOMEM; - - phy->dev = &pdev->dev; - - if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { - match = of_match_device(ti_pipe3_id_table, &pdev->dev); - if (!match) - return -EINVAL; - - phy->dpll_map = (struct pipe3_dpll_map *)match->data; - if (!phy->dpll_map) { - dev_err(&pdev->dev, "no DPLL data\n"); - return -EINVAL; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pll_ctrl"); - phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(phy->pll_ctrl_base)) - return PTR_ERR(phy->pll_ctrl_base); - - phy->sys_clk = devm_clk_get(phy->dev, "sysclk"); - if (IS_ERR(phy->sys_clk)) { - dev_err(&pdev->dev, "unable to get sysclk\n"); - return -EINVAL; - } - } - - phy->refclk = devm_clk_get(phy->dev, "refclk"); + phy->refclk = devm_clk_get(dev, "refclk"); if (IS_ERR(phy->refclk)) { - dev_err(&pdev->dev, "unable to get refclk\n"); + dev_err(dev, "unable to get refclk\n"); /* older DTBs have missing refclk in SATA PHY * so don't bail out in case of SATA PHY. */ @@ -361,80 +377,194 @@ static int ti_pipe3_probe(struct platform_device *pdev) } if (!of_device_is_compatible(node, "ti,phy-pipe3-sata")) { - phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); + phy->wkupclk = devm_clk_get(dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { - dev_err(&pdev->dev, "unable to get wkupclk\n"); + dev_err(dev, "unable to get wkupclk\n"); return PTR_ERR(phy->wkupclk); } } else { phy->wkupclk = ERR_PTR(-ENODEV); - phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, - "syscon-pllreset"); - if (IS_ERR(phy->dpll_reset_syscon)) { - dev_info(&pdev->dev, - "can't get syscon-pllreset, sata dpll won't idle\n"); - phy->dpll_reset_syscon = NULL; - } else { - if (of_property_read_u32_index(node, - "syscon-pllreset", 1, - &phy->dpll_reset_reg)) { - dev_err(&pdev->dev, - "couldn't get pllreset reg. offset\n"); - return -EINVAL; - } + } + + if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie") || + phy->phy_power_syscon) { + phy->sys_clk = devm_clk_get(dev, "sysclk"); + if (IS_ERR(phy->sys_clk)) { + dev_err(dev, "unable to get sysclk\n"); + return -EINVAL; } } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { - - clk = devm_clk_get(phy->dev, "dpll_ref"); + clk = devm_clk_get(dev, "dpll_ref"); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "unable to get dpll ref clk\n"); + dev_err(dev, "unable to get dpll ref clk\n"); return PTR_ERR(clk); } clk_set_rate(clk, 1500000000); - clk = devm_clk_get(phy->dev, "dpll_ref_m2"); + clk = devm_clk_get(dev, "dpll_ref_m2"); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "unable to get dpll ref m2 clk\n"); + dev_err(dev, "unable to get dpll ref m2 clk\n"); return PTR_ERR(clk); } clk_set_rate(clk, 100000000); - clk = devm_clk_get(phy->dev, "phy-div"); + clk = devm_clk_get(dev, "phy-div"); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "unable to get phy-div clk\n"); + dev_err(dev, "unable to get phy-div clk\n"); return PTR_ERR(clk); } clk_set_rate(clk, 100000000); - phy->div_clk = devm_clk_get(phy->dev, "div-clk"); + phy->div_clk = devm_clk_get(dev, "div-clk"); if (IS_ERR(phy->div_clk)) { - dev_err(&pdev->dev, "unable to get div-clk\n"); + dev_err(dev, "unable to get div-clk\n"); return PTR_ERR(phy->div_clk); } } else { phy->div_clk = ERR_PTR(-ENODEV); } - control_node = of_parse_phandle(node, "ctrl-module", 0); - if (!control_node) { - dev_err(&pdev->dev, "Failed to get control device phandle\n"); - return -EINVAL; + return 0; +} + +static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy) +{ + struct device *dev = phy->dev; + struct device_node *node = dev->of_node; + struct device_node *control_node; + struct platform_device *control_pdev; + + phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-phy-power"); + if (IS_ERR(phy->phy_power_syscon)) { + dev_dbg(dev, + "can't get syscon-phy-power, using control device\n"); + phy->phy_power_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-phy-power", 1, + &phy->power_reg)) { + dev_err(dev, "couldn't get power reg. offset\n"); + return -EINVAL; + } + } + + if (!phy->phy_power_syscon) { + control_node = of_parse_phandle(node, "ctrl-module", 0); + if (!control_node) { + dev_err(dev, "Failed to get control device phandle\n"); + return -EINVAL; + } + + control_pdev = of_find_device_by_node(control_node); + if (!control_pdev) { + dev_err(dev, "Failed to get control device\n"); + return -EINVAL; + } + + phy->control_dev = &control_pdev->dev; + } + + if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { + phy->pcs_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pcs"); + if (IS_ERR(phy->pcs_syscon)) { + dev_dbg(dev, + "can't get syscon-pcs, using omap control\n"); + phy->pcs_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pcs", 1, + &phy->pcie_pcs_reg)) { + dev_err(dev, + "couldn't get pcie pcs reg. offset\n"); + return -EINVAL; + } + } + } + + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) { + phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pllreset"); + if (IS_ERR(phy->dpll_reset_syscon)) { + dev_info(dev, + "can't get syscon-pllreset, sata dpll won't idle\n"); + phy->dpll_reset_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pllreset", 1, + &phy->dpll_reset_reg)) { + dev_err(dev, + "couldn't get pllreset reg. offset\n"); + return -EINVAL; + } + } } - control_pdev = of_find_device_by_node(control_node); - if (!control_pdev) { - dev_err(&pdev->dev, "Failed to get control device\n"); + return 0; +} + +static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy) +{ + struct resource *res; + const struct of_device_id *match; + struct device *dev = phy->dev; + struct device_node *node = dev->of_node; + struct platform_device *pdev = to_platform_device(dev); + + if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) + return 0; + + match = of_match_device(ti_pipe3_id_table, dev); + if (!match) + return -EINVAL; + + phy->dpll_map = (struct pipe3_dpll_map *)match->data; + if (!phy->dpll_map) { + dev_err(dev, "no DPLL data\n"); return -EINVAL; } - phy->control_dev = &control_pdev->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "pll_ctrl"); + phy->pll_ctrl_base = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->pll_ctrl_base)) + return PTR_ERR(phy->pll_ctrl_base); + + return 0; +} - omap_control_phy_power(phy->control_dev, 0); +static int ti_pipe3_probe(struct platform_device *pdev) +{ + struct ti_pipe3 *phy; + struct phy *generic_phy; + struct phy_provider *phy_provider; + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->dev = dev; + + ret = ti_pipe3_get_pll_base(phy); + if (ret) + return ret; + + ret = ti_pipe3_get_sysctrl(phy); + if (ret) + return ret; + + ret = ti_pipe3_get_clk(phy); + if (ret) + return ret; platform_set_drvdata(pdev, phy); - pm_runtime_enable(phy->dev); + pm_runtime_enable(dev); /* * Prevent auto-disable of refclk for SATA PHY due to Errata i783 @@ -446,13 +576,15 @@ static int ti_pipe3_probe(struct platform_device *pdev) } } - generic_phy = devm_phy_create(phy->dev, NULL, &ops); + generic_phy = devm_phy_create(dev, NULL, &ops); if (IS_ERR(generic_phy)) return PTR_ERR(generic_phy); phy_set_drvdata(generic_phy, phy); - phy_provider = devm_of_phy_provider_register(phy->dev, - of_phy_simple_xlate); + + ti_pipe3_power_off(generic_phy); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 3a707dd14238..4a3fc6e59f8e 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -34,7 +34,7 @@ #include <linux/usb/otg.h> #include <linux/phy/phy.h> #include <linux/pm_runtime.h> -#include <linux/usb/musb-omap.h> +#include <linux/usb/musb.h> #include <linux/usb/ulpi.h> #include <linux/i2c/twl.h> #include <linux/regulator/consumer.h> @@ -148,10 +148,10 @@ * If VBUS is valid or ID is ground, then we know a * cable is present and we need to be runtime-enabled */ -static inline bool cable_present(enum omap_musb_vbus_id_status stat) +static inline bool cable_present(enum musb_vbus_id_status stat) { - return stat == OMAP_MUSB_VBUS_VALID || - stat == OMAP_MUSB_ID_GROUND; + return stat == MUSB_VBUS_VALID || + stat == MUSB_ID_GROUND; } struct twl4030_usb { @@ -170,7 +170,7 @@ struct twl4030_usb { enum twl4030_usb_mode usb_mode; int irq; - enum omap_musb_vbus_id_status linkstat; + enum musb_vbus_id_status linkstat; bool vbus_supplied; struct delayed_work id_workaround_work; @@ -276,11 +276,11 @@ static bool twl4030_is_driving_vbus(struct twl4030_usb *twl) return (ret & (ULPI_OTG_DRVVBUS | ULPI_OTG_CHRGVBUS)) ? true : false; } -static enum omap_musb_vbus_id_status +static enum musb_vbus_id_status twl4030_usb_linkstat(struct twl4030_usb *twl) { int status; - enum omap_musb_vbus_id_status linkstat = OMAP_MUSB_UNKNOWN; + enum musb_vbus_id_status linkstat = MUSB_UNKNOWN; twl->vbus_supplied = false; @@ -306,14 +306,14 @@ static enum omap_musb_vbus_id_status } if (status & BIT(2)) - linkstat = OMAP_MUSB_ID_GROUND; + linkstat = MUSB_ID_GROUND; else if (status & BIT(7)) - linkstat = OMAP_MUSB_VBUS_VALID; + linkstat = MUSB_VBUS_VALID; else - linkstat = OMAP_MUSB_VBUS_OFF; + linkstat = MUSB_VBUS_OFF; } else { - if (twl->linkstat != OMAP_MUSB_UNKNOWN) - linkstat = OMAP_MUSB_VBUS_OFF; + if (twl->linkstat != MUSB_UNKNOWN) + linkstat = MUSB_VBUS_OFF; } dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", @@ -535,7 +535,7 @@ static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL); static irqreturn_t twl4030_usb_irq(int irq, void *_twl) { struct twl4030_usb *twl = _twl; - enum omap_musb_vbus_id_status status; + enum musb_vbus_id_status status; bool status_changed = false; status = twl4030_usb_linkstat(twl); @@ -567,11 +567,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) pm_runtime_mark_last_busy(twl->dev); pm_runtime_put_autosuspend(twl->dev); } - omap_musb_mailbox(status); + musb_mailbox(status); } /* don't schedule during sleep - irq works right then */ - if (status == OMAP_MUSB_ID_GROUND && pm_runtime_active(twl->dev)) { + if (status == MUSB_ID_GROUND && pm_runtime_active(twl->dev)) { cancel_delayed_work(&twl->id_workaround_work); schedule_delayed_work(&twl->id_workaround_work, HZ); } @@ -670,7 +670,7 @@ static int twl4030_usb_probe(struct platform_device *pdev) twl->dev = &pdev->dev; twl->irq = platform_get_irq(pdev, 0); twl->vbus_supplied = false; - twl->linkstat = OMAP_MUSB_UNKNOWN; + twl->linkstat = MUSB_UNKNOWN; twl->phy.dev = twl->dev; twl->phy.label = "twl4030"; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 312c78b27a32..99a4c10ed43f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -244,7 +244,7 @@ config PINCTRL_ZYNQ select PINMUX select GENERIC_PINCONF help - This selectes the pinctrl driver for Xilinx Zynq. + This selects the pinctrl driver for Xilinx Zynq. source "drivers/pinctrl/bcm/Kconfig" source "drivers/pinctrl/berlin/Kconfig" @@ -252,6 +252,7 @@ source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" source "drivers/pinctrl/nomadik/Kconfig" +source "drivers/pinctrl/pxa/Kconfig" source "drivers/pinctrl/qcom/Kconfig" source "drivers/pinctrl/samsung/Kconfig" source "drivers/pinctrl/sh-pfc/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 738cb4929a49..bf1b5ca5180b 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -41,15 +41,16 @@ obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o obj-$(CONFIG_ARCH_BCM) += bcm/ -obj-$(CONFIG_ARCH_BERLIN) += berlin/ +obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ -obj-$(CONFIG_PLAT_ORION) += mvebu/ +obj-$(CONFIG_PINCTRL_MVEBU) += mvebu/ obj-y += nomadik/ +obj-$(CONFIG_ARCH_PXA) += pxa/ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/ obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/ -obj-$(CONFIG_PLAT_SPEAR) += spear/ +obj-$(CONFIG_PINCTRL_SPEAR) += spear/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/ obj-$(CONFIG_ARCH_VT8500) += vt8500/ diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index cd11d4d9ad58..2cc74384cafa 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -9,6 +9,7 @@ config PINCTRL_BCM281XX select PINCONF select GENERIC_PINCONF select REGMAP_MMIO + default ARCH_BCM_MOBILE help Say Y here to support Broadcom BCM281xx pinctrl driver, which is used for the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351, @@ -20,27 +21,41 @@ config PINCTRL_BCM2835 select PINMUX select PINCONF -config PINCTRL_CYGNUS_GPIO - bool "Broadcom Cygnus GPIO (with PINCONF) driver" - depends on OF_GPIO && ARCH_BCM_CYGNUS +config PINCTRL_IPROC_GPIO + bool "Broadcom iProc GPIO (with PINCONF) driver" + depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST) select GPIOLIB_IRQCHIP select PINCONF select GENERIC_PINCONF - default ARCH_BCM_CYGNUS + default ARCH_BCM_IPROC help - Say yes here to enable the Broadcom Cygnus GPIO driver. + Say yes here to enable the Broadcom iProc GPIO driver. + + The Broadcom iProc based SoCs- Cygnus, NS2, NSP and Stingray, use + same GPIO Controller IP hence this driver could be used for all. The Broadcom Cygnus SoC has 3 GPIO controllers including the ASIU GPIO controller (ASIU), the chipCommonG GPIO controller (CCM), and the always-ON GPIO controller (CRMU/AON). All 3 GPIO controllers are supported by this driver. - All 3 Cygnus GPIO controllers support basic PINCONF functions such + The Broadcom NSP has two GPIO controllers including the ChipcommonA + GPIO, the ChipcommonB GPIO. Later controller is supported by this + driver. + + The Broadcom NS2 has two GPIO controller including the CRMU GPIO, + the ChipcommonG GPIO. Both controllers are supported by this driver. + + The Broadcom Stingray GPIO controllers are supported by this driver. + + All above SoCs GPIO controllers support basic PINCONF functions such as bias pull up, pull down, and drive strength configurations, when these pins are muxed to GPIO. - Pins from the ASIU GPIO can be individually muxed to GPIO function, - through interaction with the Cygnus IOMUX controller. + It provides the framework where pins from the individual GPIO can be + individually muxed to GPIO function, through interaction with the + SoCs IOMUX controller. This features could be used only on SoCs which + support individual pin muxing. config PINCTRL_CYGNUS_MUX bool "Broadcom Cygnus IOMUX driver" @@ -54,3 +69,20 @@ config PINCTRL_CYGNUS_MUX The Broadcom Cygnus IOMUX driver supports group based IOMUX configuration, with the exception that certain individual pins can be overrided to GPIO function + +config PINCTRL_NSP_GPIO + bool "Broadcom NSP GPIO (with PINCONF) driver" + depends on OF_GPIO && (ARCH_BCM_NSP || COMPILE_TEST) + select GPIOLIB_IRQCHIP + select PINCONF + select GENERIC_PINCONF + default ARCH_BCM_NSP + help + Say yes here to enable the Broadcom NSP GPIO driver. + + The Broadcom Northstar Plus SoC ChipcommonA GPIO controller is + supported by this driver. + + The ChipcommonA GPIO controller support basic PINCONF functions such + as bias pull up, pull down, and drive strength configurations, when + these pins are muxed to GPIO. diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile index 2b2f70ee804c..6148367d5e8c 100644 --- a/drivers/pinctrl/bcm/Makefile +++ b/drivers/pinctrl/bcm/Makefile @@ -2,5 +2,6 @@ obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o -obj-$(CONFIG_PINCTRL_CYGNUS_GPIO) += pinctrl-cygnus-gpio.o +obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o +obj-$(CONFIG_PINCTRL_NSP_GPIO) += pinctrl-nsp-gpio.o diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index a1ea565fcd46..75b0d8c8f058 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset) return bcm2835_gpio_get_bit(pc, GPLEV0, offset); } -static int bcm2835_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - return pinctrl_gpio_direction_output(chip->base + offset); -} - static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); @@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); } +static int bcm2835_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + bcm2835_gpio_set(chip, offset, value); + return pinctrl_gpio_direction_output(chip->base + offset); +} + static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); @@ -794,7 +795,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, return 0; out: - kfree(maps); + bcm2835_pctl_dt_free_map(pctldev, maps, num_pins * maps_per_pin); return err; } diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 12a48f498b75..314591a4609b 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -10,14 +10,16 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * This file contains the Broadcom Cygnus GPIO driver that supports 3 - * GPIO controllers on Cygnus including the ASIU GPIO controller, the + * This file contains the Broadcom Iproc GPIO driver that supports 3 + * GPIO controllers on Iproc including the ASIU GPIO controller, the * chipCommonG GPIO controller, and the always-on GPIO controller. Basic * PINCONF such as bias pull up/down, and drive strength are also supported * in this driver. * - * Pins from the ASIU GPIO can be individually muxed to GPIO function, - * through the interaction with the Cygnus IOMUX controller + * It provides the functionality where pins from the GPIO can be + * individually muxed to GPIO function, if individual pad + * configuration is supported, through the interaction with respective + * SoCs IOMUX controller. */ #include <linux/kernel.h> @@ -34,42 +36,42 @@ #include "../pinctrl-utils.h" -#define CYGNUS_GPIO_DATA_IN_OFFSET 0x00 -#define CYGNUS_GPIO_DATA_OUT_OFFSET 0x04 -#define CYGNUS_GPIO_OUT_EN_OFFSET 0x08 -#define CYGNUS_GPIO_INT_TYPE_OFFSET 0x0c -#define CYGNUS_GPIO_INT_DE_OFFSET 0x10 -#define CYGNUS_GPIO_INT_EDGE_OFFSET 0x14 -#define CYGNUS_GPIO_INT_MSK_OFFSET 0x18 -#define CYGNUS_GPIO_INT_STAT_OFFSET 0x1c -#define CYGNUS_GPIO_INT_MSTAT_OFFSET 0x20 -#define CYGNUS_GPIO_INT_CLR_OFFSET 0x24 -#define CYGNUS_GPIO_PAD_RES_OFFSET 0x34 -#define CYGNUS_GPIO_RES_EN_OFFSET 0x38 +#define IPROC_GPIO_DATA_IN_OFFSET 0x00 +#define IPROC_GPIO_DATA_OUT_OFFSET 0x04 +#define IPROC_GPIO_OUT_EN_OFFSET 0x08 +#define IPROC_GPIO_INT_TYPE_OFFSET 0x0c +#define IPROC_GPIO_INT_DE_OFFSET 0x10 +#define IPROC_GPIO_INT_EDGE_OFFSET 0x14 +#define IPROC_GPIO_INT_MSK_OFFSET 0x18 +#define IPROC_GPIO_INT_STAT_OFFSET 0x1c +#define IPROC_GPIO_INT_MSTAT_OFFSET 0x20 +#define IPROC_GPIO_INT_CLR_OFFSET 0x24 +#define IPROC_GPIO_PAD_RES_OFFSET 0x34 +#define IPROC_GPIO_RES_EN_OFFSET 0x38 /* drive strength control for ASIU GPIO */ -#define CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET 0x58 +#define IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET 0x58 /* drive strength control for CCM/CRMU (AON) GPIO */ -#define CYGNUS_GPIO_DRV0_CTRL_OFFSET 0x00 +#define IPROC_GPIO_DRV0_CTRL_OFFSET 0x00 #define GPIO_BANK_SIZE 0x200 #define NGPIOS_PER_BANK 32 #define GPIO_BANK(pin) ((pin) / NGPIOS_PER_BANK) -#define CYGNUS_GPIO_REG(pin, reg) (GPIO_BANK(pin) * GPIO_BANK_SIZE + (reg)) -#define CYGNUS_GPIO_SHIFT(pin) ((pin) % NGPIOS_PER_BANK) +#define IPROC_GPIO_REG(pin, reg) (GPIO_BANK(pin) * GPIO_BANK_SIZE + (reg)) +#define IPROC_GPIO_SHIFT(pin) ((pin) % NGPIOS_PER_BANK) #define GPIO_DRV_STRENGTH_BIT_SHIFT 20 #define GPIO_DRV_STRENGTH_BITS 3 #define GPIO_DRV_STRENGTH_BIT_MASK ((1 << GPIO_DRV_STRENGTH_BITS) - 1) /* - * Cygnus GPIO core + * Iproc GPIO core * * @dev: pointer to device - * @base: I/O register base for Cygnus GPIO controller - * @io_ctrl: I/O register base for certain type of Cygnus GPIO controller that + * @base: I/O register base for Iproc GPIO controller + * @io_ctrl: I/O register base for certain type of Iproc GPIO controller that * has the PINCONF support implemented outside of the GPIO block * @lock: lock to protect access to I/O registers * @gc: GPIO chip @@ -79,7 +81,7 @@ * @pctl: pointer to pinctrl_dev * @pctldesc: pinctrl descriptor */ -struct cygnus_gpio { +struct iproc_gpio { struct device *dev; void __iomem *base; @@ -96,33 +98,33 @@ struct cygnus_gpio { struct pinctrl_desc pctldesc; }; -static inline struct cygnus_gpio *to_cygnus_gpio(struct gpio_chip *gc) +static inline struct iproc_gpio *to_iproc_gpio(struct gpio_chip *gc) { - return container_of(gc, struct cygnus_gpio, gc); + return container_of(gc, struct iproc_gpio, gc); } /* * Mapping from PINCONF pins to GPIO pins is 1-to-1 */ -static inline unsigned cygnus_pin_to_gpio(unsigned pin) +static inline unsigned iproc_pin_to_gpio(unsigned pin) { return pin; } /** - * cygnus_set_bit - set or clear one bit (corresponding to the GPIO pin) in a - * Cygnus GPIO register + * iproc_set_bit - set or clear one bit (corresponding to the GPIO pin) in a + * Iproc GPIO register * - * @cygnus_gpio: Cygnus GPIO device + * @iproc_gpio: Iproc GPIO device * @reg: register offset * @gpio: GPIO pin * @set: set or clear */ -static inline void cygnus_set_bit(struct cygnus_gpio *chip, unsigned int reg, +static inline void iproc_set_bit(struct iproc_gpio *chip, unsigned int reg, unsigned gpio, bool set) { - unsigned int offset = CYGNUS_GPIO_REG(gpio, reg); - unsigned int shift = CYGNUS_GPIO_SHIFT(gpio); + unsigned int offset = IPROC_GPIO_REG(gpio, reg); + unsigned int shift = IPROC_GPIO_SHIFT(gpio); u32 val; val = readl(chip->base + offset); @@ -133,19 +135,19 @@ static inline void cygnus_set_bit(struct cygnus_gpio *chip, unsigned int reg, writel(val, chip->base + offset); } -static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg, +static inline bool iproc_get_bit(struct iproc_gpio *chip, unsigned int reg, unsigned gpio) { - unsigned int offset = CYGNUS_GPIO_REG(gpio, reg); - unsigned int shift = CYGNUS_GPIO_SHIFT(gpio); + unsigned int offset = IPROC_GPIO_REG(gpio, reg); + unsigned int shift = IPROC_GPIO_SHIFT(gpio); return !!(readl(chip->base + offset) & BIT(shift)); } -static void cygnus_gpio_irq_handler(struct irq_desc *desc) +static void iproc_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); struct irq_chip *irq_chip = irq_desc_get_chip(desc); int i, bit; @@ -154,7 +156,7 @@ static void cygnus_gpio_irq_handler(struct irq_desc *desc) /* go through the entire GPIO banks and handle all interrupts */ for (i = 0; i < chip->num_banks; i++) { unsigned long val = readl(chip->base + (i * GPIO_BANK_SIZE) + - CYGNUS_GPIO_INT_MSTAT_OFFSET); + IPROC_GPIO_INT_MSTAT_OFFSET); for_each_set_bit(bit, &val, NGPIOS_PER_BANK) { unsigned pin = NGPIOS_PER_BANK * i + bit; @@ -165,7 +167,7 @@ static void cygnus_gpio_irq_handler(struct irq_desc *desc) * handler, so we do not leave any window */ writel(BIT(bit), chip->base + (i * GPIO_BANK_SIZE) + - CYGNUS_GPIO_INT_CLR_OFFSET); + IPROC_GPIO_INT_CLR_OFFSET); generic_handle_irq(child_irq); } @@ -175,60 +177,60 @@ static void cygnus_gpio_irq_handler(struct irq_desc *desc) } -static void cygnus_gpio_irq_ack(struct irq_data *d) +static void iproc_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned gpio = d->hwirq; - unsigned int offset = CYGNUS_GPIO_REG(gpio, - CYGNUS_GPIO_INT_CLR_OFFSET); - unsigned int shift = CYGNUS_GPIO_SHIFT(gpio); + unsigned int offset = IPROC_GPIO_REG(gpio, + IPROC_GPIO_INT_CLR_OFFSET); + unsigned int shift = IPROC_GPIO_SHIFT(gpio); u32 val = BIT(shift); writel(val, chip->base + offset); } /** - * cygnus_gpio_irq_set_mask - mask/unmask a GPIO interrupt + * iproc_gpio_irq_set_mask - mask/unmask a GPIO interrupt * * @d: IRQ chip data * @unmask: mask/unmask GPIO interrupt */ -static void cygnus_gpio_irq_set_mask(struct irq_data *d, bool unmask) +static void iproc_gpio_irq_set_mask(struct irq_data *d, bool unmask) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned gpio = d->hwirq; - cygnus_set_bit(chip, CYGNUS_GPIO_INT_MSK_OFFSET, gpio, unmask); + iproc_set_bit(chip, IPROC_GPIO_INT_MSK_OFFSET, gpio, unmask); } -static void cygnus_gpio_irq_mask(struct irq_data *d) +static void iproc_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - cygnus_gpio_irq_set_mask(d, false); + iproc_gpio_irq_set_mask(d, false); spin_unlock_irqrestore(&chip->lock, flags); } -static void cygnus_gpio_irq_unmask(struct irq_data *d) +static void iproc_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - cygnus_gpio_irq_set_mask(d, true); + iproc_gpio_irq_set_mask(d, true); spin_unlock_irqrestore(&chip->lock, flags); } -static int cygnus_gpio_irq_set_type(struct irq_data *d, unsigned int type) +static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned gpio = d->hwirq; bool level_triggered = false; bool dual_edge = false; @@ -263,10 +265,10 @@ static int cygnus_gpio_irq_set_type(struct irq_data *d, unsigned int type) } spin_lock_irqsave(&chip->lock, flags); - cygnus_set_bit(chip, CYGNUS_GPIO_INT_TYPE_OFFSET, gpio, + iproc_set_bit(chip, IPROC_GPIO_INT_TYPE_OFFSET, gpio, level_triggered); - cygnus_set_bit(chip, CYGNUS_GPIO_INT_DE_OFFSET, gpio, dual_edge); - cygnus_set_bit(chip, CYGNUS_GPIO_INT_EDGE_OFFSET, gpio, + iproc_set_bit(chip, IPROC_GPIO_INT_DE_OFFSET, gpio, dual_edge); + iproc_set_bit(chip, IPROC_GPIO_INT_EDGE_OFFSET, gpio, rising_or_high); spin_unlock_irqrestore(&chip->lock, flags); @@ -277,32 +279,32 @@ static int cygnus_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip cygnus_gpio_irq_chip = { - .name = "bcm-cygnus-gpio", - .irq_ack = cygnus_gpio_irq_ack, - .irq_mask = cygnus_gpio_irq_mask, - .irq_unmask = cygnus_gpio_irq_unmask, - .irq_set_type = cygnus_gpio_irq_set_type, +static struct irq_chip iproc_gpio_irq_chip = { + .name = "bcm-iproc-gpio", + .irq_ack = iproc_gpio_irq_ack, + .irq_mask = iproc_gpio_irq_mask, + .irq_unmask = iproc_gpio_irq_unmask, + .irq_set_type = iproc_gpio_irq_set_type, }; /* - * Request the Cygnus IOMUX pinmux controller to mux individual pins to GPIO + * Request the Iproc IOMUX pinmux controller to mux individual pins to GPIO */ -static int cygnus_gpio_request(struct gpio_chip *gc, unsigned offset) +static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned gpio = gc->base + offset; - /* not all Cygnus GPIO pins can be muxed individually */ + /* not all Iproc GPIO pins can be muxed individually */ if (!chip->pinmux_is_supported) return 0; return pinctrl_request_gpio(gpio); } -static void cygnus_gpio_free(struct gpio_chip *gc, unsigned offset) +static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned gpio = gc->base + offset; if (!chip->pinmux_is_supported) @@ -311,13 +313,13 @@ static void cygnus_gpio_free(struct gpio_chip *gc, unsigned offset) pinctrl_free_gpio(gpio); } -static int cygnus_gpio_direction_input(struct gpio_chip *gc, unsigned gpio) +static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - cygnus_set_bit(chip, CYGNUS_GPIO_OUT_EN_OFFSET, gpio, false); + iproc_set_bit(chip, IPROC_GPIO_OUT_EN_OFFSET, gpio, false); spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u set input\n", gpio); @@ -325,15 +327,15 @@ static int cygnus_gpio_direction_input(struct gpio_chip *gc, unsigned gpio) return 0; } -static int cygnus_gpio_direction_output(struct gpio_chip *gc, unsigned gpio, +static int iproc_gpio_direction_output(struct gpio_chip *gc, unsigned gpio, int val) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - cygnus_set_bit(chip, CYGNUS_GPIO_OUT_EN_OFFSET, gpio, true); - cygnus_set_bit(chip, CYGNUS_GPIO_DATA_OUT_OFFSET, gpio, !!(val)); + iproc_set_bit(chip, IPROC_GPIO_OUT_EN_OFFSET, gpio, true); + iproc_set_bit(chip, IPROC_GPIO_DATA_OUT_OFFSET, gpio, !!(val)); spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u set output, value:%d\n", gpio, val); @@ -341,29 +343,29 @@ static int cygnus_gpio_direction_output(struct gpio_chip *gc, unsigned gpio, return 0; } -static void cygnus_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) +static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); + struct iproc_gpio *chip = to_iproc_gpio(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - cygnus_set_bit(chip, CYGNUS_GPIO_DATA_OUT_OFFSET, gpio, !!(val)); + iproc_set_bit(chip, IPROC_GPIO_DATA_OUT_OFFSET, gpio, !!(val)); spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val); } -static int cygnus_gpio_get(struct gpio_chip *gc, unsigned gpio) +static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio) { - struct cygnus_gpio *chip = to_cygnus_gpio(gc); - unsigned int offset = CYGNUS_GPIO_REG(gpio, - CYGNUS_GPIO_DATA_IN_OFFSET); - unsigned int shift = CYGNUS_GPIO_SHIFT(gpio); + struct iproc_gpio *chip = to_iproc_gpio(gc); + unsigned int offset = IPROC_GPIO_REG(gpio, + IPROC_GPIO_DATA_IN_OFFSET); + unsigned int shift = IPROC_GPIO_SHIFT(gpio); return !!(readl(chip->base + offset) & BIT(shift)); } -static int cygnus_get_groups_count(struct pinctrl_dev *pctldev) +static int iproc_get_groups_count(struct pinctrl_dev *pctldev) { return 1; } @@ -372,20 +374,20 @@ static int cygnus_get_groups_count(struct pinctrl_dev *pctldev) * Only one group: "gpio_grp", since this local pinctrl device only performs * GPIO specific PINCONF configurations */ -static const char *cygnus_get_group_name(struct pinctrl_dev *pctldev, +static const char *iproc_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { return "gpio_grp"; } -static const struct pinctrl_ops cygnus_pctrl_ops = { - .get_groups_count = cygnus_get_groups_count, - .get_group_name = cygnus_get_group_name, +static const struct pinctrl_ops iproc_pctrl_ops = { + .get_groups_count = iproc_get_groups_count, + .get_group_name = iproc_get_group_name, .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, .dt_free_map = pinctrl_utils_dt_free_map, }; -static int cygnus_gpio_set_pull(struct cygnus_gpio *chip, unsigned gpio, +static int iproc_gpio_set_pull(struct iproc_gpio *chip, unsigned gpio, bool disable, bool pull_up) { unsigned long flags; @@ -393,11 +395,11 @@ static int cygnus_gpio_set_pull(struct cygnus_gpio *chip, unsigned gpio, spin_lock_irqsave(&chip->lock, flags); if (disable) { - cygnus_set_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio, false); + iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio, false); } else { - cygnus_set_bit(chip, CYGNUS_GPIO_PAD_RES_OFFSET, gpio, + iproc_set_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio, pull_up); - cygnus_set_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio, true); + iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio, true); } spin_unlock_irqrestore(&chip->lock, flags); @@ -407,18 +409,18 @@ static int cygnus_gpio_set_pull(struct cygnus_gpio *chip, unsigned gpio, return 0; } -static void cygnus_gpio_get_pull(struct cygnus_gpio *chip, unsigned gpio, +static void iproc_gpio_get_pull(struct iproc_gpio *chip, unsigned gpio, bool *disable, bool *pull_up) { unsigned long flags; spin_lock_irqsave(&chip->lock, flags); - *disable = !cygnus_get_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio); - *pull_up = cygnus_get_bit(chip, CYGNUS_GPIO_PAD_RES_OFFSET, gpio); + *disable = !iproc_get_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio); + *pull_up = iproc_get_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio); spin_unlock_irqrestore(&chip->lock, flags); } -static int cygnus_gpio_set_strength(struct cygnus_gpio *chip, unsigned gpio, +static int iproc_gpio_set_strength(struct iproc_gpio *chip, unsigned gpio, unsigned strength) { void __iomem *base; @@ -432,14 +434,14 @@ static int cygnus_gpio_set_strength(struct cygnus_gpio *chip, unsigned gpio, if (chip->io_ctrl) { base = chip->io_ctrl; - offset = CYGNUS_GPIO_DRV0_CTRL_OFFSET; + offset = IPROC_GPIO_DRV0_CTRL_OFFSET; } else { base = chip->base; - offset = CYGNUS_GPIO_REG(gpio, - CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET); + offset = IPROC_GPIO_REG(gpio, + IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET); } - shift = CYGNUS_GPIO_SHIFT(gpio); + shift = IPROC_GPIO_SHIFT(gpio); dev_dbg(chip->dev, "gpio:%u set drive strength:%d mA\n", gpio, strength); @@ -458,7 +460,7 @@ static int cygnus_gpio_set_strength(struct cygnus_gpio *chip, unsigned gpio, return 0; } -static int cygnus_gpio_get_strength(struct cygnus_gpio *chip, unsigned gpio, +static int iproc_gpio_get_strength(struct iproc_gpio *chip, unsigned gpio, u16 *strength) { void __iomem *base; @@ -468,14 +470,14 @@ static int cygnus_gpio_get_strength(struct cygnus_gpio *chip, unsigned gpio, if (chip->io_ctrl) { base = chip->io_ctrl; - offset = CYGNUS_GPIO_DRV0_CTRL_OFFSET; + offset = IPROC_GPIO_DRV0_CTRL_OFFSET; } else { base = chip->base; - offset = CYGNUS_GPIO_REG(gpio, - CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET); + offset = IPROC_GPIO_REG(gpio, + IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET); } - shift = CYGNUS_GPIO_SHIFT(gpio); + shift = IPROC_GPIO_SHIFT(gpio); spin_lock_irqsave(&chip->lock, flags); *strength = 0; @@ -493,44 +495,43 @@ static int cygnus_gpio_get_strength(struct cygnus_gpio *chip, unsigned gpio, return 0; } -static int cygnus_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, +static int iproc_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { - struct cygnus_gpio *chip = pinctrl_dev_get_drvdata(pctldev); + struct iproc_gpio *chip = pinctrl_dev_get_drvdata(pctldev); enum pin_config_param param = pinconf_to_config_param(*config); - unsigned gpio = cygnus_pin_to_gpio(pin); + unsigned gpio = iproc_pin_to_gpio(pin); u16 arg; bool disable, pull_up; int ret; switch (param) { case PIN_CONFIG_BIAS_DISABLE: - cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up); + iproc_gpio_get_pull(chip, gpio, &disable, &pull_up); if (disable) return 0; else return -EINVAL; case PIN_CONFIG_BIAS_PULL_UP: - cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up); + iproc_gpio_get_pull(chip, gpio, &disable, &pull_up); if (!disable && pull_up) return 0; else return -EINVAL; case PIN_CONFIG_BIAS_PULL_DOWN: - cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up); + iproc_gpio_get_pull(chip, gpio, &disable, &pull_up); if (!disable && !pull_up) return 0; else return -EINVAL; case PIN_CONFIG_DRIVE_STRENGTH: - ret = cygnus_gpio_get_strength(chip, gpio, &arg); + ret = iproc_gpio_get_strength(chip, gpio, &arg); if (ret) return ret; - else - *config = pinconf_to_config_packed(param, arg); + *config = pinconf_to_config_packed(param, arg); return 0; @@ -541,13 +542,13 @@ static int cygnus_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, return -ENOTSUPP; } -static int cygnus_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, +static int iproc_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { - struct cygnus_gpio *chip = pinctrl_dev_get_drvdata(pctldev); + struct iproc_gpio *chip = pinctrl_dev_get_drvdata(pctldev); enum pin_config_param param; u16 arg; - unsigned i, gpio = cygnus_pin_to_gpio(pin); + unsigned i, gpio = iproc_pin_to_gpio(pin); int ret = -ENOTSUPP; for (i = 0; i < num_configs; i++) { @@ -556,25 +557,25 @@ static int cygnus_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - ret = cygnus_gpio_set_pull(chip, gpio, true, false); + ret = iproc_gpio_set_pull(chip, gpio, true, false); if (ret < 0) goto out; break; case PIN_CONFIG_BIAS_PULL_UP: - ret = cygnus_gpio_set_pull(chip, gpio, false, true); + ret = iproc_gpio_set_pull(chip, gpio, false, true); if (ret < 0) goto out; break; case PIN_CONFIG_BIAS_PULL_DOWN: - ret = cygnus_gpio_set_pull(chip, gpio, false, false); + ret = iproc_gpio_set_pull(chip, gpio, false, false); if (ret < 0) goto out; break; case PIN_CONFIG_DRIVE_STRENGTH: - ret = cygnus_gpio_set_strength(chip, gpio, arg); + ret = iproc_gpio_set_strength(chip, gpio, arg); if (ret < 0) goto out; break; @@ -589,20 +590,20 @@ out: return ret; } -static const struct pinconf_ops cygnus_pconf_ops = { +static const struct pinconf_ops iproc_pconf_ops = { .is_generic = true, - .pin_config_get = cygnus_pin_config_get, - .pin_config_set = cygnus_pin_config_set, + .pin_config_get = iproc_pin_config_get, + .pin_config_set = iproc_pin_config_set, }; /* - * Cygnus GPIO controller supports some PINCONF related configurations such as + * Iproc GPIO controller supports some PINCONF related configurations such as * pull up, pull down, and drive strength, when the pin is configured to GPIO * * Here a local pinctrl device is created with simple 1-to-1 pin mapping to the * local GPIO pins */ -static int cygnus_gpio_register_pinconf(struct cygnus_gpio *chip) +static int iproc_gpio_register_pinconf(struct iproc_gpio *chip) { struct pinctrl_desc *pctldesc = &chip->pctldesc; struct pinctrl_pin_desc *pins; @@ -622,10 +623,10 @@ static int cygnus_gpio_register_pinconf(struct cygnus_gpio *chip) } pctldesc->name = dev_name(chip->dev); - pctldesc->pctlops = &cygnus_pctrl_ops; + pctldesc->pctlops = &iproc_pctrl_ops; pctldesc->pins = pins; pctldesc->npins = gc->ngpio; - pctldesc->confops = &cygnus_pconf_ops; + pctldesc->confops = &iproc_pconf_ops; chip->pctl = pinctrl_register(pctldesc, chip->dev, chip); if (IS_ERR(chip->pctl)) { @@ -636,59 +637,27 @@ static int cygnus_gpio_register_pinconf(struct cygnus_gpio *chip) return 0; } -static void cygnus_gpio_unregister_pinconf(struct cygnus_gpio *chip) +static void iproc_gpio_unregister_pinconf(struct iproc_gpio *chip) { - if (chip->pctl) - pinctrl_unregister(chip->pctl); + pinctrl_unregister(chip->pctl); } -struct cygnus_gpio_data { - unsigned num_gpios; -}; - -static const struct cygnus_gpio_data cygnus_cmm_gpio_data = { - .num_gpios = 24, -}; - -static const struct cygnus_gpio_data cygnus_asiu_gpio_data = { - .num_gpios = 146, +static const struct of_device_id iproc_gpio_of_match[] = { + { .compatible = "brcm,cygnus-ccm-gpio" }, + { .compatible = "brcm,cygnus-asiu-gpio" }, + { .compatible = "brcm,cygnus-crmu-gpio" }, + { .compatible = "brcm,iproc-gpio" }, + { } }; -static const struct cygnus_gpio_data cygnus_crmu_gpio_data = { - .num_gpios = 6, -}; - -static const struct of_device_id cygnus_gpio_of_match[] = { - { - .compatible = "brcm,cygnus-ccm-gpio", - .data = &cygnus_cmm_gpio_data, - }, - { - .compatible = "brcm,cygnus-asiu-gpio", - .data = &cygnus_asiu_gpio_data, - }, - { - .compatible = "brcm,cygnus-crmu-gpio", - .data = &cygnus_crmu_gpio_data, - } -}; - -static int cygnus_gpio_probe(struct platform_device *pdev) +static int iproc_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; - struct cygnus_gpio *chip; + struct iproc_gpio *chip; struct gpio_chip *gc; u32 ngpios; int irq, ret; - const struct of_device_id *match; - const struct cygnus_gpio_data *gpio_data; - - match = of_match_device(cygnus_gpio_of_match, dev); - if (!match) - return -ENODEV; - gpio_data = match->data; - ngpios = gpio_data->num_gpios; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) @@ -713,6 +682,11 @@ static int cygnus_gpio_probe(struct platform_device *pdev) } } + if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) { + dev_err(&pdev->dev, "missing ngpios DT property\n"); + return -ENODEV; + } + spin_lock_init(&chip->lock); gc = &chip->gc; @@ -722,12 +696,12 @@ static int cygnus_gpio_probe(struct platform_device *pdev) gc->label = dev_name(dev); gc->dev = dev; gc->of_node = dev->of_node; - gc->request = cygnus_gpio_request; - gc->free = cygnus_gpio_free; - gc->direction_input = cygnus_gpio_direction_input; - gc->direction_output = cygnus_gpio_direction_output; - gc->set = cygnus_gpio_set; - gc->get = cygnus_gpio_get; + gc->request = iproc_gpio_request; + gc->free = iproc_gpio_free; + gc->direction_input = iproc_gpio_direction_input; + gc->direction_output = iproc_gpio_direction_output; + gc->set = iproc_gpio_set; + gc->get = iproc_gpio_get; chip->pinmux_is_supported = of_property_read_bool(dev->of_node, "gpio-ranges"); @@ -738,7 +712,7 @@ static int cygnus_gpio_probe(struct platform_device *pdev) return ret; } - ret = cygnus_gpio_register_pinconf(chip); + ret = iproc_gpio_register_pinconf(chip); if (ret) { dev_err(dev, "unable to register pinconf\n"); goto err_rm_gpiochip; @@ -747,21 +721,21 @@ static int cygnus_gpio_probe(struct platform_device *pdev) /* optional GPIO interrupt support */ irq = platform_get_irq(pdev, 0); if (irq) { - ret = gpiochip_irqchip_add(gc, &cygnus_gpio_irq_chip, 0, + ret = gpiochip_irqchip_add(gc, &iproc_gpio_irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "no GPIO irqchip\n"); goto err_unregister_pinconf; } - gpiochip_set_chained_irqchip(gc, &cygnus_gpio_irq_chip, irq, - cygnus_gpio_irq_handler); + gpiochip_set_chained_irqchip(gc, &iproc_gpio_irq_chip, irq, + iproc_gpio_irq_handler); } return 0; err_unregister_pinconf: - cygnus_gpio_unregister_pinconf(chip); + iproc_gpio_unregister_pinconf(chip); err_rm_gpiochip: gpiochip_remove(gc); @@ -769,16 +743,16 @@ err_rm_gpiochip: return ret; } -static struct platform_driver cygnus_gpio_driver = { +static struct platform_driver iproc_gpio_driver = { .driver = { - .name = "cygnus-gpio", - .of_match_table = cygnus_gpio_of_match, + .name = "iproc-gpio", + .of_match_table = iproc_gpio_of_match, }, - .probe = cygnus_gpio_probe, + .probe = iproc_gpio_probe, }; -static int __init cygnus_gpio_init(void) +static int __init iproc_gpio_init(void) { - return platform_driver_probe(&cygnus_gpio_driver, cygnus_gpio_probe); + return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); } -arch_initcall_sync(cygnus_gpio_init); +arch_initcall_sync(iproc_gpio_init); diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c new file mode 100644 index 000000000000..725c36f917f9 --- /dev/null +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -0,0 +1,749 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This file contains the Broadcom Northstar Plus (NSP) GPIO driver that + * supports the chipCommonA GPIO controller. Basic PINCONF such as bias, + * pull up/down, slew and drive strength are also supported in this driver. + * + * Pins from the chipCommonA GPIO can be individually muxed to GPIO function, + * through the interaction with the NSP IOMUX controller. + */ + +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/slab.h> + +#include "../pinctrl-utils.h" + +#define NSP_CHIP_A_INT_STATUS 0x00 +#define NSP_CHIP_A_INT_MASK 0x04 +#define NSP_GPIO_DATA_IN 0x40 +#define NSP_GPIO_DATA_OUT 0x44 +#define NSP_GPIO_OUT_EN 0x48 +#define NSP_GPIO_INT_POLARITY 0x50 +#define NSP_GPIO_INT_MASK 0x54 +#define NSP_GPIO_EVENT 0x58 +#define NSP_GPIO_EVENT_INT_MASK 0x5c +#define NSP_GPIO_EVENT_INT_POLARITY 0x64 +#define NSP_CHIP_A_GPIO_INT_BIT 0x01 + +/* I/O parameters offset for chipcommon A GPIO */ +#define NSP_GPIO_DRV_CTRL 0x00 +#define NSP_GPIO_HYSTERESIS_EN 0x10 +#define NSP_GPIO_SLEW_RATE_EN 0x14 +#define NSP_PULL_UP_EN 0x18 +#define NSP_PULL_DOWN_EN 0x1c +#define GPIO_DRV_STRENGTH_BITS 0x03 + +/* + * nsp GPIO core + * + * @dev: pointer to device + * @base: I/O register base for nsp GPIO controller + * @io_ctrl: I/O register base for PINCONF support outside the GPIO block + * @gc: GPIO chip + * @pctl: pointer to pinctrl_dev + * @pctldesc: pinctrl descriptor + * @irq_domain: pointer to irq domain + * @lock: lock to protect access to I/O registers + */ +struct nsp_gpio { + struct device *dev; + void __iomem *base; + void __iomem *io_ctrl; + struct gpio_chip gc; + struct pinctrl_dev *pctl; + struct pinctrl_desc pctldesc; + struct irq_domain *irq_domain; + spinlock_t lock; +}; + +enum base_type { + REG, + IO_CTRL +}; + +static inline struct nsp_gpio *to_nsp_gpio(struct gpio_chip *gc) +{ + return container_of(gc, struct nsp_gpio, gc); +} + +/* + * Mapping from PINCONF pins to GPIO pins is 1-to-1 + */ +static inline unsigned nsp_pin_to_gpio(unsigned pin) +{ + return pin; +} + +/* + * nsp_set_bit - set or clear one bit (corresponding to the GPIO pin) in a + * nsp GPIO register + * + * @nsp_gpio: nsp GPIO device + * @base_type: reg base to modify + * @reg: register offset + * @gpio: GPIO pin + * @set: set or clear + */ +static inline void nsp_set_bit(struct nsp_gpio *chip, enum base_type address, + unsigned int reg, unsigned gpio, bool set) +{ + u32 val; + void __iomem *base_address; + + if (address == IO_CTRL) + base_address = chip->io_ctrl; + else + base_address = chip->base; + + val = readl(base_address + reg); + if (set) + val |= BIT(gpio); + else + val &= ~BIT(gpio); + + writel(val, base_address + reg); +} + +/* + * nsp_get_bit - get one bit (corresponding to the GPIO pin) in a + * nsp GPIO register + */ +static inline bool nsp_get_bit(struct nsp_gpio *chip, enum base_type address, + unsigned int reg, unsigned gpio) +{ + if (address == IO_CTRL) + return !!(readl(chip->io_ctrl + reg) & BIT(gpio)); + else + return !!(readl(chip->base + reg) & BIT(gpio)); +} + +static irqreturn_t nsp_gpio_irq_handler(int irq, void *data) +{ + struct nsp_gpio *chip = (struct nsp_gpio *)data; + struct gpio_chip gc = chip->gc; + int bit; + unsigned long int_bits = 0; + u32 int_status; + + /* go through the entire GPIOs and handle all interrupts */ + int_status = readl(chip->base + NSP_CHIP_A_INT_STATUS); + if (int_status & NSP_CHIP_A_GPIO_INT_BIT) { + unsigned int event, level; + + /* Get level and edge interrupts */ + event = readl(chip->base + NSP_GPIO_EVENT_INT_MASK) & + readl(chip->base + NSP_GPIO_EVENT); + level = readl(chip->base + NSP_GPIO_DATA_IN) ^ + readl(chip->base + NSP_GPIO_INT_POLARITY); + level &= readl(chip->base + NSP_GPIO_INT_MASK); + int_bits = level | event; + + for_each_set_bit(bit, &int_bits, gc.ngpio) { + /* + * Clear the interrupt before invoking the + * handler, so we do not leave any window + */ + writel(BIT(bit), chip->base + NSP_GPIO_EVENT); + generic_handle_irq( + irq_linear_revmap(chip->irq_domain, bit)); + } + } + + return int_bits ? IRQ_HANDLED : IRQ_NONE; +} + +static void nsp_gpio_irq_ack(struct irq_data *d) +{ + struct nsp_gpio *chip = irq_data_get_irq_chip_data(d); + unsigned gpio = d->hwirq; + u32 val = BIT(gpio); + u32 trigger_type; + + trigger_type = irq_get_trigger_type(d->irq); + if (trigger_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) + nsp_set_bit(chip, REG, NSP_GPIO_EVENT, gpio, val); +} + +/* + * nsp_gpio_irq_set_mask - mask/unmask a GPIO interrupt + * + * @d: IRQ chip data + * @unmask: mask/unmask GPIO interrupt + */ +static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask) +{ + struct nsp_gpio *chip = irq_data_get_irq_chip_data(d); + unsigned gpio = d->hwirq; + u32 trigger_type; + + trigger_type = irq_get_trigger_type(d->irq); + if (trigger_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) + nsp_set_bit(chip, REG, NSP_GPIO_EVENT_INT_MASK, gpio, unmask); + else + nsp_set_bit(chip, REG, NSP_GPIO_INT_MASK, gpio, unmask); +} + +static void nsp_gpio_irq_mask(struct irq_data *d) +{ + struct nsp_gpio *chip = irq_data_get_irq_chip_data(d); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_gpio_irq_set_mask(d, false); + spin_unlock_irqrestore(&chip->lock, flags); +} + +static void nsp_gpio_irq_unmask(struct irq_data *d) +{ + struct nsp_gpio *chip = irq_data_get_irq_chip_data(d); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_gpio_irq_set_mask(d, true); + spin_unlock_irqrestore(&chip->lock, flags); +} + +static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct nsp_gpio *chip = irq_data_get_irq_chip_data(d); + unsigned gpio = d->hwirq; + bool level_low; + bool falling; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + falling = nsp_get_bit(chip, REG, NSP_GPIO_EVENT_INT_POLARITY, gpio); + level_low = nsp_get_bit(chip, REG, NSP_GPIO_INT_POLARITY, gpio); + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + falling = false; + break; + + case IRQ_TYPE_EDGE_FALLING: + falling = true; + break; + + case IRQ_TYPE_LEVEL_HIGH: + level_low = false; + break; + + case IRQ_TYPE_LEVEL_LOW: + level_low = true; + break; + + default: + dev_err(chip->dev, "invalid GPIO IRQ type 0x%x\n", + type); + spin_unlock_irqrestore(&chip->lock, flags); + return -EINVAL; + } + + nsp_set_bit(chip, REG, NSP_GPIO_EVENT_INT_POLARITY, gpio, falling); + nsp_set_bit(chip, REG, NSP_GPIO_INT_POLARITY, gpio, level_low); + spin_unlock_irqrestore(&chip->lock, flags); + + dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio, + level_low ? "true" : "false", falling ? "true" : "false"); + return 0; +} + +static struct irq_chip nsp_gpio_irq_chip = { + .name = "gpio-a", + .irq_enable = nsp_gpio_irq_unmask, + .irq_disable = nsp_gpio_irq_mask, + .irq_ack = nsp_gpio_irq_ack, + .irq_mask = nsp_gpio_irq_mask, + .irq_unmask = nsp_gpio_irq_unmask, + .irq_set_type = nsp_gpio_irq_set_type, +}; + +/* + * Request the nsp IOMUX pinmux controller to mux individual pins to GPIO + */ +static int nsp_gpio_request(struct gpio_chip *gc, unsigned offset) +{ + unsigned gpio = gc->base + offset; + + return pinctrl_request_gpio(gpio); +} + +static void nsp_gpio_free(struct gpio_chip *gc, unsigned offset) +{ + unsigned gpio = gc->base + offset; + + pinctrl_free_gpio(gpio); +} + +static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio) +{ + struct nsp_gpio *chip = to_nsp_gpio(gc); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_set_bit(chip, REG, NSP_GPIO_OUT_EN, gpio, false); + spin_unlock_irqrestore(&chip->lock, flags); + + dev_dbg(chip->dev, "gpio:%u set input\n", gpio); + return 0; +} + +static int nsp_gpio_direction_output(struct gpio_chip *gc, unsigned gpio, + int val) +{ + struct nsp_gpio *chip = to_nsp_gpio(gc); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_set_bit(chip, REG, NSP_GPIO_OUT_EN, gpio, true); + nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val)); + spin_unlock_irqrestore(&chip->lock, flags); + + dev_dbg(chip->dev, "gpio:%u set output, value:%d\n", gpio, val); + return 0; +} + +static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) +{ + struct nsp_gpio *chip = to_nsp_gpio(gc); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val)); + spin_unlock_irqrestore(&chip->lock, flags); + + dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val); +} + +static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio) +{ + struct nsp_gpio *chip = to_nsp_gpio(gc); + + return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio)); +} + +static int nsp_gpio_to_irq(struct gpio_chip *gc, unsigned offset) +{ + struct nsp_gpio *chip = to_nsp_gpio(gc); + + return irq_linear_revmap(chip->irq_domain, offset); +} + +static int nsp_get_groups_count(struct pinctrl_dev *pctldev) +{ + return 1; +} + +/* + * Only one group: "gpio_grp", since this local pinctrl device only performs + * GPIO specific PINCONF configurations + */ +static const char *nsp_get_group_name(struct pinctrl_dev *pctldev, + unsigned selector) +{ + return "gpio_grp"; +} + +static const struct pinctrl_ops nsp_pctrl_ops = { + .get_groups_count = nsp_get_groups_count, + .get_group_name = nsp_get_group_name, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_dt_free_map, +}; + +static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u16 slew) +{ + if (slew) + nsp_set_bit(chip, IO_CTRL, NSP_GPIO_SLEW_RATE_EN, gpio, true); + else + nsp_set_bit(chip, IO_CTRL, NSP_GPIO_SLEW_RATE_EN, gpio, false); + + return 0; +} + +static int nsp_gpio_set_pull(struct nsp_gpio *chip, unsigned gpio, + bool pull_up, bool pull_down) +{ + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + nsp_set_bit(chip, IO_CTRL, NSP_PULL_DOWN_EN, gpio, pull_down); + nsp_set_bit(chip, IO_CTRL, NSP_PULL_UP_EN, gpio, pull_up); + spin_unlock_irqrestore(&chip->lock, flags); + + dev_dbg(chip->dev, "gpio:%u set pullup:%d pulldown: %d\n", + gpio, pull_up, pull_down); + return 0; +} + +static void nsp_gpio_get_pull(struct nsp_gpio *chip, unsigned gpio, + bool *pull_up, bool *pull_down) +{ + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + *pull_up = nsp_get_bit(chip, IO_CTRL, NSP_PULL_UP_EN, gpio); + *pull_down = nsp_get_bit(chip, IO_CTRL, NSP_PULL_DOWN_EN, gpio); + spin_unlock_irqrestore(&chip->lock, flags); +} + +static int nsp_gpio_set_strength(struct nsp_gpio *chip, unsigned gpio, + u16 strength) +{ + u32 offset, shift, i; + u32 val; + unsigned long flags; + + /* make sure drive strength is supported */ + if (strength < 2 || strength > 16 || (strength % 2)) + return -ENOTSUPP; + + shift = gpio; + offset = NSP_GPIO_DRV_CTRL; + dev_dbg(chip->dev, "gpio:%u set drive strength:%d mA\n", gpio, + strength); + spin_lock_irqsave(&chip->lock, flags); + strength = (strength / 2) - 1; + for (i = GPIO_DRV_STRENGTH_BITS; i > 0; i--) { + val = readl(chip->io_ctrl + offset); + val &= ~BIT(shift); + val |= ((strength >> (i-1)) & 0x1) << shift; + writel(val, chip->io_ctrl + offset); + offset += 4; + } + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int nsp_gpio_get_strength(struct nsp_gpio *chip, unsigned gpio, + u16 *strength) +{ + unsigned int offset, shift; + u32 val; + unsigned long flags; + int i; + + offset = NSP_GPIO_DRV_CTRL; + shift = gpio; + + spin_lock_irqsave(&chip->lock, flags); + *strength = 0; + for (i = (GPIO_DRV_STRENGTH_BITS - 1); i >= 0; i--) { + val = readl(chip->io_ctrl + offset) & BIT(shift); + val >>= shift; + *strength += (val << i); + offset += 4; + } + + /* convert to mA */ + *strength = (*strength + 1) * 2; + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +int nsp_pin_config_group_get(struct pinctrl_dev *pctldev, unsigned selector, + unsigned long *config) +{ + return 0; +} + +int nsp_pin_config_group_set(struct pinctrl_dev *pctldev, unsigned selector, + unsigned long *configs, unsigned num_configs) +{ + return 0; +} + +static int nsp_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, + unsigned long *config) +{ + struct nsp_gpio *chip = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + unsigned int gpio; + u16 arg = 0; + bool pull_up, pull_down; + int ret; + + gpio = nsp_pin_to_gpio(pin); + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + nsp_gpio_get_pull(chip, gpio, &pull_up, &pull_down); + if ((pull_up == false) && (pull_down == false)) + return 0; + else + return -EINVAL; + + case PIN_CONFIG_BIAS_PULL_UP: + nsp_gpio_get_pull(chip, gpio, &pull_up, &pull_down); + if (pull_up) + return 0; + else + return -EINVAL; + + case PIN_CONFIG_BIAS_PULL_DOWN: + nsp_gpio_get_pull(chip, gpio, &pull_up, &pull_down); + if (pull_down) + return 0; + else + return -EINVAL; + + case PIN_CONFIG_DRIVE_STRENGTH: + ret = nsp_gpio_get_strength(chip, gpio, &arg); + if (ret) + return ret; + *config = pinconf_to_config_packed(param, arg); + return 0; + + default: + return -ENOTSUPP; + } +} + +static int nsp_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, + unsigned long *configs, unsigned num_configs) +{ + struct nsp_gpio *chip = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param; + u16 arg; + unsigned int i, gpio; + int ret = -ENOTSUPP; + + gpio = nsp_pin_to_gpio(pin); + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + ret = nsp_gpio_set_pull(chip, gpio, false, false); + if (ret < 0) + goto out; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + ret = nsp_gpio_set_pull(chip, gpio, true, false); + if (ret < 0) + goto out; + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + ret = nsp_gpio_set_pull(chip, gpio, false, true); + if (ret < 0) + goto out; + break; + + case PIN_CONFIG_DRIVE_STRENGTH: + ret = nsp_gpio_set_strength(chip, gpio, arg); + if (ret < 0) + goto out; + break; + + case PIN_CONFIG_SLEW_RATE: + ret = nsp_gpio_set_slew(chip, gpio, arg); + if (ret < 0) + goto out; + break; + + default: + dev_err(chip->dev, "invalid configuration\n"); + return -ENOTSUPP; + } + } + +out: + return ret; +} + +static const struct pinconf_ops nsp_pconf_ops = { + .is_generic = true, + .pin_config_get = nsp_pin_config_get, + .pin_config_set = nsp_pin_config_set, + .pin_config_group_get = nsp_pin_config_group_get, + .pin_config_group_set = nsp_pin_config_group_set, +}; + +/* + * NSP GPIO controller supports some PINCONF related configurations such as + * pull up, pull down, slew and drive strength, when the pin is configured + * to GPIO. + * + * Here a local pinctrl device is created with simple 1-to-1 pin mapping to the + * local GPIO pins + */ +static int nsp_gpio_register_pinconf(struct nsp_gpio *chip) +{ + struct pinctrl_desc *pctldesc = &chip->pctldesc; + struct pinctrl_pin_desc *pins; + struct gpio_chip *gc = &chip->gc; + int i; + + pins = devm_kcalloc(chip->dev, gc->ngpio, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + for (i = 0; i < gc->ngpio; i++) { + pins[i].number = i; + pins[i].name = devm_kasprintf(chip->dev, GFP_KERNEL, + "gpio-%d", i); + if (!pins[i].name) + return -ENOMEM; + } + pctldesc->name = dev_name(chip->dev); + pctldesc->pctlops = &nsp_pctrl_ops; + pctldesc->pins = pins; + pctldesc->npins = gc->ngpio; + pctldesc->confops = &nsp_pconf_ops; + + chip->pctl = pinctrl_register(pctldesc, chip->dev, chip); + if (IS_ERR(chip->pctl)) { + dev_err(chip->dev, "unable to register pinctrl device\n"); + return PTR_ERR(chip->pctl); + } + + return 0; +} + +static const struct of_device_id nsp_gpio_of_match[] = { + {.compatible = "brcm,nsp-gpio-a",}, + {} +}; + +static int nsp_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nsp_gpio *chip; + struct gpio_chip *gc; + u32 val, count; + int irq, ret; + + if (of_property_read_u32(pdev->dev.of_node, "ngpios", &val)) { + dev_err(&pdev->dev, "Missing ngpios OF property\n"); + return -ENODEV; + } + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->dev = dev; + platform_set_drvdata(pdev, chip); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base = devm_ioremap_resource(dev, res); + if (IS_ERR(chip->base)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(chip->base); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + chip->io_ctrl = devm_ioremap_resource(dev, res); + if (IS_ERR(chip->io_ctrl)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(chip->io_ctrl); + } + + spin_lock_init(&chip->lock); + gc = &chip->gc; + gc->base = -1; + gc->can_sleep = false; + gc->ngpio = val; + gc->label = dev_name(dev); + gc->dev = dev; + gc->of_node = dev->of_node; + gc->request = nsp_gpio_request; + gc->free = nsp_gpio_free; + gc->direction_input = nsp_gpio_direction_input; + gc->direction_output = nsp_gpio_direction_output; + gc->set = nsp_gpio_set; + gc->get = nsp_gpio_get; + gc->to_irq = nsp_gpio_to_irq; + + /* optional GPIO interrupt support */ + irq = platform_get_irq(pdev, 0); + if (irq > 0) { + /* Create irq domain so that each pin can be assigned an IRQ.*/ + chip->irq_domain = irq_domain_add_linear(gc->of_node, gc->ngpio, + &irq_domain_simple_ops, + chip); + if (!chip->irq_domain) { + dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n"); + return -ENXIO; + } + + /* Map each gpio to an IRQ and set the handler for gpiolib. */ + for (count = 0; count < gc->ngpio; count++) { + int irq = irq_create_mapping(chip->irq_domain, count); + + irq_set_chip_and_handler(irq, &nsp_gpio_irq_chip, + handle_simple_irq); + irq_set_chip_data(irq, chip); + } + + /* Install ISR for this GPIO controller. */ + ret = devm_request_irq(&pdev->dev, irq, nsp_gpio_irq_handler, + IRQF_SHARED, "gpio-a", chip); + if (ret) { + dev_err(&pdev->dev, "Unable to request IRQ%d: %d\n", + irq, ret); + goto err_rm_gpiochip; + } + + val = readl(chip->base + NSP_CHIP_A_INT_MASK); + val = val | NSP_CHIP_A_GPIO_INT_BIT; + writel(val, (chip->base + NSP_CHIP_A_INT_MASK)); + } + + ret = gpiochip_add(gc); + if (ret < 0) { + dev_err(dev, "unable to add GPIO chip\n"); + return ret; + } + + ret = nsp_gpio_register_pinconf(chip); + if (ret) { + dev_err(dev, "unable to register pinconf\n"); + goto err_rm_gpiochip; + } + + return 0; + +err_rm_gpiochip: + gpiochip_remove(gc); + + return ret; +} + +static struct platform_driver nsp_gpio_driver = { + .driver = { + .name = "nsp-gpio-a", + .of_match_table = nsp_gpio_of_match, + }, + .probe = nsp_gpio_probe, +}; + +static int __init nsp_gpio_init(void) +{ + return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); +} +arch_initcall_sync(nsp_gpio_init); diff --git a/drivers/pinctrl/berlin/Makefile b/drivers/pinctrl/berlin/Makefile index 06f94029ad66..6f641ce2c830 100644 --- a/drivers/pinctrl/berlin/Makefile +++ b/drivers/pinctrl/berlin/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_PINCTRL_BERLIN) += berlin.o +obj-y += berlin.o obj-$(CONFIG_PINCTRL_BERLIN_BG2) += berlin-bg2.o obj-$(CONFIG_PINCTRL_BERLIN_BG2CD) += berlin-bg2cd.o obj-$(CONFIG_PINCTRL_BERLIN_BG2Q) += berlin-bg2q.o diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c index 37a037543d29..587d1ff6210e 100644 --- a/drivers/pinctrl/freescale/pinctrl-vf610.c +++ b/drivers/pinctrl/freescale/pinctrl-vf610.c @@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = { static struct imx_pinctrl_soc_info vf610_pinctrl_info = { .pins = vf610_pinctrl_pads, .npins = ARRAY_SIZE(vf610_pinctrl_pads), - .flags = SHARE_MUX_CONF_REG, + .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID, }; static const struct of_device_id vf610_pinctrl_of_match[] = { diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index e42d5d4183f5..5979d38c46b2 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c @@ -28,6 +28,7 @@ .padcfglock_offset = BXT_PADCFGLOCK, \ .hostown_offset = BXT_HOSTSW_OWN, \ .ie_offset = BXT_GPI_IE, \ + .gpp_size = 32, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 392e28d3f48d..26f6b6ffea5b 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -25,9 +25,6 @@ #include "pinctrl-intel.h" -/* Maximum number of pads in each group */ -#define NPADS_IN_GPP 24 - /* Offset from regs */ #define PADBAR 0x00c #define GPI_IS 0x100 @@ -37,6 +34,7 @@ #define PADOWN_BITS 4 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) +#define PADOWN_GPP(p) ((p) / 8) /* Offset from pad_regs */ #define PADCFG0 0x000 @@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin, static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) { const struct intel_community *community; - unsigned padno, gpp, gpp_offset, offset; + unsigned padno, gpp, offset, group; void __iomem *padown; community = intel_get_community(pctrl, pin); @@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) return true; padno = pin_to_padno(community, pin); - gpp = padno / NPADS_IN_GPP; - gpp_offset = padno % NPADS_IN_GPP; - offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; + group = padno / community->gpp_size; + gpp = PADOWN_GPP(padno % community->gpp_size); + offset = community->padown_offset + 0x10 * group + gpp * 4; padown = community->regs + offset; return !(readl(padown) & PADOWN_MASK(padno)); @@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin) return false; padno = pin_to_padno(community, pin); - gpp = padno / NPADS_IN_GPP; + gpp = padno / community->gpp_size; offset = community->hostown_offset + gpp * 4; hostown = community->regs + offset; - return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); + return !(readl(hostown) & BIT(padno % community->gpp_size)); } static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) @@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) return false; padno = pin_to_padno(community, pin); - gpp = padno / NPADS_IN_GPP; + gpp = padno / community->gpp_size; /* * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, @@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) */ offset = community->padcfglock_offset + gpp * 8; value = readl(community->regs + offset); - if (value & BIT(pin % NPADS_IN_GPP)) + if (value & BIT(pin % community->gpp_size)) return true; offset = community->padcfglock_offset + 4 + gpp * 8; value = readl(community->regs + offset); - if (value & BIT(pin % NPADS_IN_GPP)) + if (value & BIT(pin % community->gpp_size)) return true; return false; @@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d) community = intel_get_community(pctrl, pin); if (community) { unsigned padno = pin_to_padno(community, pin); - unsigned gpp_offset = padno % NPADS_IN_GPP; - unsigned gpp = padno / NPADS_IN_GPP; + unsigned gpp_offset = padno % community->gpp_size; + unsigned gpp = padno / community->gpp_size; writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); } @@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) community = intel_get_community(pctrl, pin); if (community) { unsigned padno = pin_to_padno(community, pin); - unsigned gpp_offset = padno % NPADS_IN_GPP; - unsigned gpp = padno / NPADS_IN_GPP; + unsigned gpp_offset = padno % community->gpp_size; + unsigned gpp = padno / community->gpp_size; void __iomem *reg; u32 value; @@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) return -EINVAL; padno = pin_to_padno(community, pin); - gpp = padno / NPADS_IN_GPP; - gpp_offset = padno % NPADS_IN_GPP; + gpp = padno / community->gpp_size; + gpp_offset = padno % community->gpp_size; /* Clear the existing wake status */ writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); @@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, /* Only interrupts that are enabled */ pending &= enabled; - for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { + for_each_set_bit(gpp_offset, &pending, community->gpp_size) { unsigned padno, irq; /* * The last group in community can have less pins * than NPADS_IN_GPP. */ - padno = gpp_offset + gpp * NPADS_IN_GPP; + padno = gpp_offset + gpp * community->gpp_size; if (padno >= community->npins) break; @@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev, community->regs = regs; community->pad_regs = regs + padbar; - community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); + community->ngpps = DIV_ROUND_UP(community->npins, + community->gpp_size); } irq = platform_get_irq(pdev, 0); diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 4ec8b572a288..b60215793017 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -55,6 +55,8 @@ struct intel_function { * ACPI). * @ie_offset: Register offset of GPI_IE from @regs. * @pin_base: Starting pin of pins in this community + * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, + * HOSTSW_OWN, GPI_IS, GPI_IE, etc. * @npins: Number of pins in this community * @regs: Community specific common registers (reserved for core driver) * @pad_regs: Community specific pad registers (reserved for core driver) @@ -68,6 +70,7 @@ struct intel_community { unsigned hostown_offset; unsigned ie_offset; unsigned pin_base; + unsigned gpp_size; size_t npins; void __iomem *regs; void __iomem *pad_regs; diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 1de9ae5010db..c725a5313b4e 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -30,6 +30,7 @@ .padcfglock_offset = SPT_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ .ie_offset = SPT_GPI_IE, \ + .gpp_size = 24, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ } diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c index b317b0b664ea..98e0bebfdf92 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c @@ -351,7 +351,7 @@ static int __init mtk_pinctrl_init(void) return platform_driver_register(&mtk_pinctrl_driver); } -module_init(mtk_pinctrl_init); +arch_initcall(mtk_pinctrl_init); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MediaTek MT8127 Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c index 404f1178511d..1c153b860f36 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c @@ -366,7 +366,7 @@ static int __init mtk_pinctrl_init(void) return platform_driver_register(&mtk_pinctrl_driver); } -module_init(mtk_pinctrl_init); +arch_initcall(mtk_pinctrl_init); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MediaTek Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c index ad271840d865..a62514eb2129 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c @@ -394,7 +394,7 @@ static int __init mtk_pinctrl_init(void) return platform_driver_register(&mtk_pinctrl_driver); } -module_init(mtk_pinctrl_init); +arch_initcall(mtk_pinctrl_init); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MediaTek Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 5c717275a7fa..e22cbaf9f9cf 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -509,6 +509,9 @@ static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, err = pinconf_generic_parse_dt_config(node, pctldev, &configs, &num_configs); + if (err) + return err; + if (num_configs) has_config = 1; @@ -520,21 +523,23 @@ static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (has_config && num_pins >= 1) maps_per_pin++; - if (!num_pins || !maps_per_pin) - return -EINVAL; + if (!num_pins || !maps_per_pin) { + err = -EINVAL; + goto exit; + } reserve = num_pins * maps_per_pin; err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, reserve); if (err < 0) - goto fail; + goto exit; for (i = 0; i < num_pins; i++) { err = of_property_read_u32_index(node, "pinmux", i, &pinfunc); if (err) - goto fail; + goto exit; pin = MTK_GET_PIN_NO(pinfunc); func = MTK_GET_PIN_FUNC(pinfunc); @@ -543,20 +548,21 @@ static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, func >= ARRAY_SIZE(mtk_gpio_functions)) { dev_err(pctl->dev, "invalid pins value.\n"); err = -EINVAL; - goto fail; + goto exit; } grp = mtk_pctrl_find_group_by_pin(pctl, pin); if (!grp) { dev_err(pctl->dev, "unable to match pin %d to group\n", pin); - return -EINVAL; + err = -EINVAL; + goto exit; } err = mtk_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map, reserved_maps, num_maps); if (err < 0) - goto fail; + goto exit; if (has_config) { err = pinctrl_utils_add_map_configs(pctldev, map, @@ -564,13 +570,14 @@ static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, configs, num_configs, PIN_MAP_TYPE_CONFIGS_GROUP); if (err < 0) - goto fail; + goto exit; } } - return 0; + err = 0; -fail: +exit: + kfree(configs); return err; } @@ -591,6 +598,7 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, &reserved_maps, num_maps); if (ret < 0) { pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); + of_node_put(np); return ret; } } diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile index 554d8af14eeb..18270cd5ea43 100644 --- a/drivers/pinctrl/mvebu/Makefile +++ b/drivers/pinctrl/mvebu/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o +obj-y += pinctrl-mvebu.o obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c index 77d2221d379d..e4d473811bb3 100644 --- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c +++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c @@ -663,28 +663,20 @@ int mvebu_pinctrl_probe(struct platform_device *pdev) /* assign mpp modes to groups */ for (n = 0; n < soc->nmodes; n++) { struct mvebu_mpp_mode *mode = &soc->modes[n]; - struct mvebu_pinctrl_group *grp = - mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); + struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; + struct mvebu_pinctrl_group *grp; unsigned num_settings; - if (!grp) { - dev_warn(&pdev->dev, "unknown pinctrl group %d\n", - mode->pid); - continue; - } - - for (num_settings = 0; ;) { - struct mvebu_mpp_ctrl_setting *set = - &mode->settings[num_settings]; - + for (num_settings = 0; ; set++) { if (!set->name) break; - num_settings++; /* skip unsupported settings for this variant */ if (pctl->variant && !(pctl->variant & set->variant)) continue; + num_settings++; + /* find gpio/gpo/gpi settings */ if (strcmp(set->name, "gpio") == 0) set->flags = MVEBU_SETTING_GPI | @@ -695,6 +687,17 @@ int mvebu_pinctrl_probe(struct platform_device *pdev) set->flags = MVEBU_SETTING_GPI; } + /* skip modes with no settings for this variant */ + if (!num_settings) + continue; + + grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); + if (!grp) { + dev_warn(&pdev->dev, "unknown pinctrl group %d\n", + mode->pid); + continue; + } + grp->settings = mode->settings; grp->num_settings = num_settings; } diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 099a3442ff42..79e6159712c2 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -220,6 +220,7 @@ static void parse_dt_cfg(struct device_node *np, * parse the config properties into generic pinconfig values. * @np: node containing the pinconfig properties * @configs: array with nconfigs entries containing the generic pinconf values + * must be freed when no longer necessary. * @nconfigs: umber of configurations */ int pinconf_generic_parse_dt_config(struct device_node *np, diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index fd342dffe4dc..8e9e8eab59ba 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c @@ -1102,32 +1102,24 @@ static struct platform_driver adi_gpio_driver = { }, }; +static struct platform_driver * const drivers[] = { + &adi_pinctrl_driver, + &adi_gpio_pint_driver, + &adi_gpio_driver, +}; + static int __init adi_pinctrl_setup(void) { int ret; - ret = platform_driver_register(&adi_pinctrl_driver); + ret = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); if (ret) return ret; - ret = platform_driver_register(&adi_gpio_pint_driver); - if (ret) - goto pint_error; - - ret = platform_driver_register(&adi_gpio_driver); - if (ret) - goto gpio_error; - #ifdef CONFIG_PM register_syscore_ops(&gpio_pm_syscore_ops); #endif - return ret; -gpio_error: - platform_driver_unregister(&adi_gpio_pint_driver); -pint_error: - platform_driver_unregister(&adi_pinctrl_driver); - - return ret; + return 0; } arch_initcall(adi_pinctrl_setup); diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 33edd07d9149..d5bdcebc6aa6 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -500,7 +500,8 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (!num_pins) { dev_err(pctldev->dev, "no pins found in node %s\n", of_node_full_name(np)); - return -EINVAL; + ret = -EINVAL; + goto exit; } /* @@ -514,19 +515,19 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, reserve); if (ret < 0) - return ret; + goto exit; for (i = 0; i < num_pins; i++) { const char *group, *func; ret = of_property_read_u32_index(np, "pinmux", i, &pinfunc); if (ret) - return ret; + goto exit; ret = atmel_pctl_xlate_pinfunc(pctldev, np, pinfunc, &group, &func); if (ret) - return ret; + goto exit; pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, group, func); @@ -537,11 +538,13 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, configs, num_configs, PIN_MAP_TYPE_CONFIGS_GROUP); if (ret < 0) - return ret; + goto exit; } } - return 0; +exit: + kfree(configs); + return ret; } static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, @@ -1000,7 +1003,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) atmel_pioctrl->irqs[i] = res->start; irq_set_chained_handler(res->start, atmel_gpio_irq_handler); irq_set_handler_data(res->start, atmel_pioctrl); - dev_dbg(dev, "bank %i: hwirq=%u\n", i, res->start); + dev_dbg(dev, "bank %i: irq=%pr\n", i, res); } atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node, diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 0d2fc0cff35e..47b625b1b789 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1828,20 +1828,20 @@ static struct platform_driver at91_pinctrl_driver = { .remove = at91_pinctrl_remove, }; +static struct platform_driver * const drivers[] = { + &at91_gpio_driver, + &at91_pinctrl_driver, +}; + static int __init at91_pinctrl_init(void) { - int ret; - - ret = platform_driver_register(&at91_gpio_driver); - if (ret) - return ret; - return platform_driver_register(&at91_pinctrl_driver); + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } arch_initcall(at91_pinctrl_init); static void __exit at91_pinctrl_exit(void) { - platform_driver_unregister(&at91_pinctrl_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(at91_pinctrl_exit); diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h index eb89ba045228..e137d139e494 100644 --- a/drivers/pinctrl/pinctrl-lantiq.h +++ b/drivers/pinctrl/pinctrl-lantiq.h @@ -162,6 +162,14 @@ enum ltq_pin { GPIO53, GPIO54, GPIO55, + GPIO56, + GPIO57, + GPIO58, + GPIO59, + GPIO60, /* 60 */ + GPIO61, + GPIO62, + GPIO63, GPIO64, GPIO65, diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index a0651128e23a..91288265e856 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -614,6 +614,40 @@ static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, } } +#define RK3228_PULL_OFFSET 0x100 + +static void rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + *regmap = info->regmap_base; + *reg = RK3228_PULL_OFFSET; + *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE; + *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3188_PULL_PINS_PER_REG); + *bit *= RK3188_PULL_BITS_PER_PIN; +} + +#define RK3228_DRV_GRF_OFFSET 0x200 + +static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + *regmap = info->regmap_base; + *reg = RK3228_DRV_GRF_OFFSET; + *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE; + *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3288_DRV_PINS_PER_REG); + *bit *= RK3288_DRV_BITS_PER_PIN; +} + #define RK3368_PULL_GRF_OFFSET 0x100 #define RK3368_PULL_PMU_OFFSET 0x10 @@ -1258,8 +1292,10 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np, func->groups[i] = child->name; grp = &info->groups[grp_index++]; ret = rockchip_pinctrl_parse_groups(child, grp, info, i++); - if (ret) + if (ret) { + of_node_put(child); return ret; + } } return 0; @@ -1304,6 +1340,7 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev, ret = rockchip_pinctrl_parse_functions(child, info, i++); if (ret) { dev_err(&pdev->dev, "failed to parse function\n"); + of_node_put(child); return ret; } } @@ -2143,6 +2180,23 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = { .pull_calc_reg = rk3188_calc_pull_reg_and_bit, }; +static struct rockchip_pin_bank rk3228_pin_banks[] = { + PIN_BANK(0, 32, "gpio0"), + PIN_BANK(1, 32, "gpio1"), + PIN_BANK(2, 32, "gpio2"), + PIN_BANK(3, 32, "gpio3"), +}; + +static struct rockchip_pin_ctrl rk3228_pin_ctrl = { + .pin_banks = rk3228_pin_banks, + .nr_banks = ARRAY_SIZE(rk3228_pin_banks), + .label = "RK3228-GPIO", + .type = RK3288, + .grf_mux_offset = 0x0, + .pull_calc_reg = rk3228_calc_pull_reg_and_bit, + .drv_calc_reg = rk3228_calc_drv_reg_and_bit, +}; + static struct rockchip_pin_bank rk3288_pin_banks[] = { PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU, IOMUX_SOURCE_PMU, @@ -2220,6 +2274,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { .data = (void *)&rk3066b_pin_ctrl }, { .compatible = "rockchip,rk3188-pinctrl", .data = (void *)&rk3188_pin_ctrl }, + { .compatible = "rockchip,rk3228-pinctrl", + .data = (void *)&rk3228_pin_ctrl }, { .compatible = "rockchip,rk3288-pinctrl", .data = (void *)&rk3288_pin_ctrl }, { .compatible = "rockchip,rk3368-pinctrl", diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index ef04b962c3d5..d24e5f1d1525 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1484,10 +1484,7 @@ static void pcs_irq_free(struct pcs_device *pcs) static void pcs_free_resources(struct pcs_device *pcs) { pcs_irq_free(pcs); - - if (pcs->pctl) - pinctrl_unregister(pcs->pctl); - + pinctrl_unregister(pcs->pctl); pcs_free_funcs(pcs); pcs_free_pingroups(pcs); } diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c index 84a43e612952..bd3aa5a4fd6d 100644 --- a/drivers/pinctrl/pinctrl-tegra-xusb.c +++ b/drivers/pinctrl/pinctrl-tegra-xusb.c @@ -253,8 +253,10 @@ static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl, err = tegra_xusb_padctl_parse_subnode(padctl, np, maps, &reserved_maps, num_maps); - if (err < 0) + if (err < 0) { + of_node_put(np); return err; + } } return 0; diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c index 0fd7fd2b0f72..9da4da219a07 100644 --- a/drivers/pinctrl/pinctrl-tegra.c +++ b/drivers/pinctrl/pinctrl-tegra.c @@ -217,6 +217,7 @@ static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, if (ret < 0) { pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); + of_node_put(np); return ret; } } diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index ae724bdab3d3..7db74699fda4 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -7,6 +7,7 @@ * publishhed by the Free Software Foundation. * * Copyright (C) 2012 John Crispin <blogic@openwrt.org> + * Copyright (C) 2015 Martin Schiller <mschiller@tdt.de> */ #include <linux/err.h> @@ -24,7 +25,7 @@ #include <lantiq_soc.h> -/* we have 3 1/2 banks of 16 bit each */ +/* we have up to 4 banks of 16 bit each */ #define PINS 16 #define PORT3 3 #define PORT(x) (x / PINS) @@ -35,7 +36,7 @@ #define MUX_ALT1 0x2 /* - * each bank has this offset apart from the 1/2 bank that is mixed into the + * each bank has this offset apart from the 4th bank that is mixed into the * other 3 ranges */ #define REG_OFF 0x30 @@ -51,7 +52,7 @@ #define GPIO_PUDSEL(p) (GPIO_BASE(p) + 0x1c) #define GPIO_PUDEN(p) (GPIO_BASE(p) + 0x20) -/* the 1/2 port needs special offsets for some registers */ +/* the 4th port needs special offsets for some registers */ #define GPIO3_OD (GPIO_BASE(0) + 0x24) #define GPIO3_PUDSEL (GPIO_BASE(0) + 0x28) #define GPIO3_PUDEN (GPIO_BASE(0) + 0x2C) @@ -80,17 +81,18 @@ #define FUNC_MUX(f, m) \ { .func = f, .mux = XWAY_MUX_##m, } -#define XWAY_MAX_PIN 32 -#define XR9_MAX_PIN 56 - enum xway_mux { XWAY_MUX_GPIO = 0, XWAY_MUX_SPI, XWAY_MUX_ASC, + XWAY_MUX_USIF, XWAY_MUX_PCI, + XWAY_MUX_CBUS, XWAY_MUX_CGU, XWAY_MUX_EBU, + XWAY_MUX_EBU2, XWAY_MUX_JTAG, + XWAY_MUX_MCD, XWAY_MUX_EXIN, XWAY_MUX_TDM, XWAY_MUX_STP, @@ -103,9 +105,15 @@ enum xway_mux { XWAY_MUX_DFE, XWAY_MUX_SDIO, XWAY_MUX_GPHY, + XWAY_MUX_SSI, + XWAY_MUX_WIFI, XWAY_MUX_NONE = 0xffff, }; +/* --------- DEPRECATED: xr9 related code --------- */ +/* ---------- use xrx100/xrx200 instead ---------- */ +#define XR9_MAX_PIN 56 + static const struct ltq_mfp_pin xway_mfp[] = { /* pin f0 f1 f2 f3 */ MFP_XWAY(GPIO0, GPIO, EXIN, NONE, TDM), @@ -113,7 +121,7 @@ static const struct ltq_mfp_pin xway_mfp[] = { MFP_XWAY(GPIO2, GPIO, CGU, EXIN, GPHY), MFP_XWAY(GPIO3, GPIO, CGU, NONE, PCI), MFP_XWAY(GPIO4, GPIO, STP, NONE, ASC), - MFP_XWAY(GPIO5, GPIO, STP, NONE, GPHY), + MFP_XWAY(GPIO5, GPIO, STP, GPHY, NONE), MFP_XWAY(GPIO6, GPIO, STP, GPT, ASC), MFP_XWAY(GPIO7, GPIO, CGU, PCI, GPHY), MFP_XWAY(GPIO8, GPIO, CGU, NMI, NONE), @@ -152,10 +160,10 @@ static const struct ltq_mfp_pin xway_mfp[] = { MFP_XWAY(GPIO41, GPIO, NONE, NONE, NONE), MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE), MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE), - MFP_XWAY(GPIO44, GPIO, NONE, GPHY, SIN), + MFP_XWAY(GPIO44, GPIO, MII, SIN, GPHY), MFP_XWAY(GPIO45, GPIO, NONE, GPHY, SIN), MFP_XWAY(GPIO46, GPIO, NONE, NONE, EXIN), - MFP_XWAY(GPIO47, GPIO, NONE, GPHY, SIN), + MFP_XWAY(GPIO47, GPIO, MII, GPHY, SIN), MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE), MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE), MFP_XWAY(GPIO50, GPIO, NONE, NONE, NONE), @@ -166,42 +174,6 @@ static const struct ltq_mfp_pin xway_mfp[] = { MFP_XWAY(GPIO55, GPIO, NONE, NONE, NONE), }; -static const struct ltq_mfp_pin ase_mfp[] = { - /* pin f0 f1 f2 f3 */ - MFP_XWAY(GPIO0, GPIO, EXIN, MII, TDM), - MFP_XWAY(GPIO1, GPIO, STP, DFE, EBU), - MFP_XWAY(GPIO2, GPIO, STP, DFE, EPHY), - MFP_XWAY(GPIO3, GPIO, STP, EPHY, EBU), - MFP_XWAY(GPIO4, GPIO, GPT, EPHY, MII), - MFP_XWAY(GPIO5, GPIO, MII, ASC, GPT), - MFP_XWAY(GPIO6, GPIO, MII, ASC, EXIN), - MFP_XWAY(GPIO7, GPIO, SPI, MII, JTAG), - MFP_XWAY(GPIO8, GPIO, SPI, MII, JTAG), - MFP_XWAY(GPIO9, GPIO, SPI, MII, JTAG), - MFP_XWAY(GPIO10, GPIO, SPI, MII, JTAG), - MFP_XWAY(GPIO11, GPIO, EBU, CGU, JTAG), - MFP_XWAY(GPIO12, GPIO, EBU, MII, SDIO), - MFP_XWAY(GPIO13, GPIO, EBU, MII, CGU), - MFP_XWAY(GPIO14, GPIO, EBU, SPI, CGU), - MFP_XWAY(GPIO15, GPIO, EBU, SPI, SDIO), - MFP_XWAY(GPIO16, GPIO, NONE, NONE, NONE), - MFP_XWAY(GPIO17, GPIO, NONE, NONE, NONE), - MFP_XWAY(GPIO18, GPIO, NONE, NONE, NONE), - MFP_XWAY(GPIO19, GPIO, EBU, MII, SDIO), - MFP_XWAY(GPIO20, GPIO, EBU, MII, SDIO), - MFP_XWAY(GPIO21, GPIO, EBU, MII, SDIO), - MFP_XWAY(GPIO22, GPIO, EBU, MII, CGU), - MFP_XWAY(GPIO23, GPIO, EBU, MII, CGU), - MFP_XWAY(GPIO24, GPIO, EBU, NONE, MII), - MFP_XWAY(GPIO25, GPIO, EBU, MII, GPT), - MFP_XWAY(GPIO26, GPIO, EBU, MII, SDIO), - MFP_XWAY(GPIO27, GPIO, EBU, NONE, MII), - MFP_XWAY(GPIO28, GPIO, MII, EBU, SDIO), - MFP_XWAY(GPIO29, GPIO, EBU, MII, EXIN), - MFP_XWAY(GPIO30, GPIO, NONE, NONE, NONE), - MFP_XWAY(GPIO31, GPIO, NONE, NONE, NONE), -}; - static const unsigned pins_jtag[] = {GPIO15, GPIO16, GPIO17, GPIO19, GPIO35}; static const unsigned pins_asc0[] = {GPIO11, GPIO12}; static const unsigned pins_asc0_cts_rts[] = {GPIO9, GPIO10}; @@ -231,6 +203,8 @@ static const unsigned pins_nand_cle[] = {GPIO24}; static const unsigned pins_nand_rdy[] = {GPIO48}; static const unsigned pins_nand_rd[] = {GPIO49}; +static const unsigned xway_exin_pin_map[] = {GPIO0, GPIO1, GPIO2, GPIO39, GPIO46, GPIO9}; + static const unsigned pins_exin0[] = {GPIO0}; static const unsigned pins_exin1[] = {GPIO1}; static const unsigned pins_exin2[] = {GPIO2}; @@ -240,7 +214,7 @@ static const unsigned pins_exin5[] = {GPIO9}; static const unsigned pins_spi[] = {GPIO16, GPIO17, GPIO18}; static const unsigned pins_spi_cs1[] = {GPIO15}; -static const unsigned pins_spi_cs2[] = {GPIO21}; +static const unsigned pins_spi_cs2[] = {GPIO22}; static const unsigned pins_spi_cs3[] = {GPIO13}; static const unsigned pins_spi_cs4[] = {GPIO10}; static const unsigned pins_spi_cs5[] = {GPIO9}; @@ -264,25 +238,6 @@ static const unsigned pins_pci_req2[] = {GPIO31}; static const unsigned pins_pci_req3[] = {GPIO3}; static const unsigned pins_pci_req4[] = {GPIO37}; -static const unsigned ase_pins_jtag[] = {GPIO7, GPIO8, GPIO9, GPIO10, GPIO11}; -static const unsigned ase_pins_asc[] = {GPIO5, GPIO6}; -static const unsigned ase_pins_stp[] = {GPIO1, GPIO2, GPIO3}; -static const unsigned ase_pins_ephy[] = {GPIO2, GPIO3, GPIO4}; -static const unsigned ase_pins_dfe[] = {GPIO1, GPIO2}; - -static const unsigned ase_pins_spi[] = {GPIO8, GPIO9, GPIO10}; -static const unsigned ase_pins_spi_cs1[] = {GPIO7}; -static const unsigned ase_pins_spi_cs2[] = {GPIO15}; -static const unsigned ase_pins_spi_cs3[] = {GPIO14}; - -static const unsigned ase_pins_exin0[] = {GPIO6}; -static const unsigned ase_pins_exin1[] = {GPIO29}; -static const unsigned ase_pins_exin2[] = {GPIO0}; - -static const unsigned ase_pins_gpt1[] = {GPIO5}; -static const unsigned ase_pins_gpt2[] = {GPIO4}; -static const unsigned ase_pins_gpt3[] = {GPIO25}; - static const struct ltq_pin_group xway_grps[] = { GRP_MUX("exin0", EXIN, pins_exin0), GRP_MUX("exin1", EXIN, pins_exin1), @@ -338,24 +293,6 @@ static const struct ltq_pin_group xway_grps[] = { GRP_MUX("gphy1 led2", GPHY, pins_gphy1_led2), }; -static const struct ltq_pin_group ase_grps[] = { - GRP_MUX("exin0", EXIN, ase_pins_exin0), - GRP_MUX("exin1", EXIN, ase_pins_exin1), - GRP_MUX("exin2", EXIN, ase_pins_exin2), - GRP_MUX("jtag", JTAG, ase_pins_jtag), - GRP_MUX("stp", STP, ase_pins_stp), - GRP_MUX("asc", ASC, ase_pins_asc), - GRP_MUX("gpt1", GPT, ase_pins_gpt1), - GRP_MUX("gpt2", GPT, ase_pins_gpt2), - GRP_MUX("gpt3", GPT, ase_pins_gpt3), - GRP_MUX("ephy", EPHY, ase_pins_ephy), - GRP_MUX("dfe", DFE, ase_pins_dfe), - GRP_MUX("spi", SPI, ase_pins_spi), - GRP_MUX("spi_cs1", SPI, ase_pins_spi_cs1), - GRP_MUX("spi_cs2", SPI, ase_pins_spi_cs2), - GRP_MUX("spi_cs3", SPI, ase_pins_spi_cs3), -}; - static const char * const xway_pci_grps[] = {"gnt1", "gnt2", "gnt3", "req1", "req2", "req3"}; @@ -395,30 +332,6 @@ static const char * const xrx_pci_grps[] = {"gnt1", "gnt2", "req1", "req2", "req3", "req4"}; -/* ase */ -static const char * const ase_exin_grps[] = {"exin0", "exin1", "exin2"}; -static const char * const ase_gpt_grps[] = {"gpt1", "gpt2", "gpt3"}; -static const char * const ase_dfe_grps[] = {"dfe"}; -static const char * const ase_ephy_grps[] = {"ephy"}; -static const char * const ase_asc_grps[] = {"asc"}; -static const char * const ase_jtag_grps[] = {"jtag"}; -static const char * const ase_stp_grps[] = {"stp"}; -static const char * const ase_spi_grps[] = {"spi", "spi_cs1", - "spi_cs2", "spi_cs3"}; - -static const struct ltq_pmx_func danube_funcs[] = { - {"spi", ARRAY_AND_SIZE(xway_spi_grps)}, - {"asc", ARRAY_AND_SIZE(xway_asc_grps)}, - {"cgu", ARRAY_AND_SIZE(xway_cgu_grps)}, - {"jtag", ARRAY_AND_SIZE(xway_jtag_grps)}, - {"exin", ARRAY_AND_SIZE(xway_exin_grps)}, - {"stp", ARRAY_AND_SIZE(xway_stp_grps)}, - {"gpt", ARRAY_AND_SIZE(xway_gpt_grps)}, - {"nmi", ARRAY_AND_SIZE(xway_nmi_grps)}, - {"pci", ARRAY_AND_SIZE(xway_pci_grps)}, - {"ebu", ARRAY_AND_SIZE(xway_ebu_grps)}, -}; - static const struct ltq_pmx_func xrx_funcs[] = { {"spi", ARRAY_AND_SIZE(xway_spi_grps)}, {"asc", ARRAY_AND_SIZE(xway_asc_grps)}, @@ -434,17 +347,991 @@ static const struct ltq_pmx_func xrx_funcs[] = { {"gphy", ARRAY_AND_SIZE(xrx_gphy_grps)}, }; +/* --------- ase related code --------- */ +#define ASE_MAX_PIN 32 + +static const struct ltq_mfp_pin ase_mfp[] = { + /* pin f0 f1 f2 f3 */ + MFP_XWAY(GPIO0, GPIO, EXIN, MII, TDM), + MFP_XWAY(GPIO1, GPIO, STP, DFE, EBU), + MFP_XWAY(GPIO2, GPIO, STP, DFE, EPHY), + MFP_XWAY(GPIO3, GPIO, STP, EPHY, EBU), + MFP_XWAY(GPIO4, GPIO, GPT, EPHY, MII), + MFP_XWAY(GPIO5, GPIO, MII, ASC, GPT), + MFP_XWAY(GPIO6, GPIO, MII, ASC, EXIN), + MFP_XWAY(GPIO7, GPIO, SPI, MII, JTAG), + MFP_XWAY(GPIO8, GPIO, SPI, MII, JTAG), + MFP_XWAY(GPIO9, GPIO, SPI, MII, JTAG), + MFP_XWAY(GPIO10, GPIO, SPI, MII, JTAG), + MFP_XWAY(GPIO11, GPIO, EBU, CGU, JTAG), + MFP_XWAY(GPIO12, GPIO, EBU, MII, SDIO), + MFP_XWAY(GPIO13, GPIO, EBU, MII, CGU), + MFP_XWAY(GPIO14, GPIO, EBU, SPI, CGU), + MFP_XWAY(GPIO15, GPIO, EBU, SPI, SDIO), + MFP_XWAY(GPIO16, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO17, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO18, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO19, GPIO, EBU, MII, SDIO), + MFP_XWAY(GPIO20, GPIO, EBU, MII, SDIO), + MFP_XWAY(GPIO21, GPIO, EBU, MII, EBU2), + MFP_XWAY(GPIO22, GPIO, EBU, MII, CGU), + MFP_XWAY(GPIO23, GPIO, EBU, MII, CGU), + MFP_XWAY(GPIO24, GPIO, EBU, EBU2, MDIO), + MFP_XWAY(GPIO25, GPIO, EBU, MII, GPT), + MFP_XWAY(GPIO26, GPIO, EBU, MII, SDIO), + MFP_XWAY(GPIO27, GPIO, EBU, NONE, MDIO), + MFP_XWAY(GPIO28, GPIO, MII, EBU, SDIO), + MFP_XWAY(GPIO29, GPIO, EBU, MII, EXIN), + MFP_XWAY(GPIO30, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO31, GPIO, NONE, NONE, NONE), +}; + +static const unsigned ase_exin_pin_map[] = {GPIO6, GPIO29, GPIO0}; + +static const unsigned ase_pins_exin0[] = {GPIO6}; +static const unsigned ase_pins_exin1[] = {GPIO29}; +static const unsigned ase_pins_exin2[] = {GPIO0}; + +static const unsigned ase_pins_jtag[] = {GPIO7, GPIO8, GPIO9, GPIO10, GPIO11}; +static const unsigned ase_pins_asc[] = {GPIO5, GPIO6}; +static const unsigned ase_pins_stp[] = {GPIO1, GPIO2, GPIO3}; +static const unsigned ase_pins_mdio[] = {GPIO24, GPIO27}; +static const unsigned ase_pins_ephy_led0[] = {GPIO2}; +static const unsigned ase_pins_ephy_led1[] = {GPIO3}; +static const unsigned ase_pins_ephy_led2[] = {GPIO4}; +static const unsigned ase_pins_dfe_led0[] = {GPIO1}; +static const unsigned ase_pins_dfe_led1[] = {GPIO2}; + +static const unsigned ase_pins_spi[] = {GPIO8, GPIO9, GPIO10}; /* DEPRECATED */ +static const unsigned ase_pins_spi_di[] = {GPIO8}; +static const unsigned ase_pins_spi_do[] = {GPIO9}; +static const unsigned ase_pins_spi_clk[] = {GPIO10}; +static const unsigned ase_pins_spi_cs1[] = {GPIO7}; +static const unsigned ase_pins_spi_cs2[] = {GPIO15}; +static const unsigned ase_pins_spi_cs3[] = {GPIO14}; + +static const unsigned ase_pins_gpt1[] = {GPIO5}; +static const unsigned ase_pins_gpt2[] = {GPIO4}; +static const unsigned ase_pins_gpt3[] = {GPIO25}; + +static const unsigned ase_pins_clkout0[] = {GPIO23}; +static const unsigned ase_pins_clkout1[] = {GPIO22}; +static const unsigned ase_pins_clkout2[] = {GPIO14}; + +static const struct ltq_pin_group ase_grps[] = { + GRP_MUX("exin0", EXIN, ase_pins_exin0), + GRP_MUX("exin1", EXIN, ase_pins_exin1), + GRP_MUX("exin2", EXIN, ase_pins_exin2), + GRP_MUX("jtag", JTAG, ase_pins_jtag), + GRP_MUX("spi", SPI, ase_pins_spi), /* DEPRECATED */ + GRP_MUX("spi_di", SPI, ase_pins_spi_di), + GRP_MUX("spi_do", SPI, ase_pins_spi_do), + GRP_MUX("spi_clk", SPI, ase_pins_spi_clk), + GRP_MUX("spi_cs1", SPI, ase_pins_spi_cs1), + GRP_MUX("spi_cs2", SPI, ase_pins_spi_cs2), + GRP_MUX("spi_cs3", SPI, ase_pins_spi_cs3), + GRP_MUX("asc", ASC, ase_pins_asc), + GRP_MUX("stp", STP, ase_pins_stp), + GRP_MUX("gpt1", GPT, ase_pins_gpt1), + GRP_MUX("gpt2", GPT, ase_pins_gpt2), + GRP_MUX("gpt3", GPT, ase_pins_gpt3), + GRP_MUX("clkout0", CGU, ase_pins_clkout0), + GRP_MUX("clkout1", CGU, ase_pins_clkout1), + GRP_MUX("clkout2", CGU, ase_pins_clkout2), + GRP_MUX("mdio", MDIO, ase_pins_mdio), + GRP_MUX("dfe led0", DFE, ase_pins_dfe_led0), + GRP_MUX("dfe led1", DFE, ase_pins_dfe_led1), + GRP_MUX("ephy led0", EPHY, ase_pins_ephy_led0), + GRP_MUX("ephy led1", EPHY, ase_pins_ephy_led1), + GRP_MUX("ephy led2", EPHY, ase_pins_ephy_led2), +}; + +static const char * const ase_exin_grps[] = {"exin0", "exin1", "exin2"}; +static const char * const ase_gpt_grps[] = {"gpt1", "gpt2", "gpt3"}; +static const char * const ase_cgu_grps[] = {"clkout0", "clkout1", + "clkout2"}; +static const char * const ase_mdio_grps[] = {"mdio"}; +static const char * const ase_dfe_grps[] = {"dfe led0", "dfe led1"}; +static const char * const ase_ephy_grps[] = {"ephy led0", "ephy led1", + "ephy led2"}; +static const char * const ase_asc_grps[] = {"asc"}; +static const char * const ase_jtag_grps[] = {"jtag"}; +static const char * const ase_stp_grps[] = {"stp"}; +static const char * const ase_spi_grps[] = {"spi", /* DEPRECATED */ + "spi_di", "spi_do", + "spi_clk", "spi_cs1", + "spi_cs2", "spi_cs3"}; + static const struct ltq_pmx_func ase_funcs[] = { {"spi", ARRAY_AND_SIZE(ase_spi_grps)}, {"asc", ARRAY_AND_SIZE(ase_asc_grps)}, + {"cgu", ARRAY_AND_SIZE(ase_cgu_grps)}, {"jtag", ARRAY_AND_SIZE(ase_jtag_grps)}, {"exin", ARRAY_AND_SIZE(ase_exin_grps)}, {"stp", ARRAY_AND_SIZE(ase_stp_grps)}, {"gpt", ARRAY_AND_SIZE(ase_gpt_grps)}, + {"mdio", ARRAY_AND_SIZE(ase_mdio_grps)}, {"ephy", ARRAY_AND_SIZE(ase_ephy_grps)}, {"dfe", ARRAY_AND_SIZE(ase_dfe_grps)}, }; +/* --------- danube related code --------- */ +#define DANUBE_MAX_PIN 32 + +static const struct ltq_mfp_pin danube_mfp[] = { + /* pin f0 f1 f2 f3 */ + MFP_XWAY(GPIO0, GPIO, EXIN, SDIO, TDM), + MFP_XWAY(GPIO1, GPIO, EXIN, CBUS, MII), + MFP_XWAY(GPIO2, GPIO, CGU, EXIN, MII), + MFP_XWAY(GPIO3, GPIO, CGU, SDIO, PCI), + MFP_XWAY(GPIO4, GPIO, STP, DFE, ASC), + MFP_XWAY(GPIO5, GPIO, STP, MII, DFE), + MFP_XWAY(GPIO6, GPIO, STP, GPT, ASC), + MFP_XWAY(GPIO7, GPIO, CGU, CBUS, MII), + MFP_XWAY(GPIO8, GPIO, CGU, NMI, MII), + MFP_XWAY(GPIO9, GPIO, ASC, SPI, MII), + MFP_XWAY(GPIO10, GPIO, ASC, SPI, MII), + MFP_XWAY(GPIO11, GPIO, ASC, CBUS, SPI), + MFP_XWAY(GPIO12, GPIO, ASC, CBUS, MCD), + MFP_XWAY(GPIO13, GPIO, EBU, SPI, MII), + MFP_XWAY(GPIO14, GPIO, CGU, CBUS, MII), + MFP_XWAY(GPIO15, GPIO, SPI, SDIO, JTAG), + MFP_XWAY(GPIO16, GPIO, SPI, SDIO, JTAG), + MFP_XWAY(GPIO17, GPIO, SPI, SDIO, JTAG), + MFP_XWAY(GPIO18, GPIO, SPI, SDIO, JTAG), + MFP_XWAY(GPIO19, GPIO, PCI, SDIO, MII), + MFP_XWAY(GPIO20, GPIO, JTAG, SDIO, MII), + MFP_XWAY(GPIO21, GPIO, PCI, EBU, GPT), + MFP_XWAY(GPIO22, GPIO, SPI, MCD, MII), + MFP_XWAY(GPIO23, GPIO, EBU, PCI, STP), + MFP_XWAY(GPIO24, GPIO, EBU, TDM, PCI), + MFP_XWAY(GPIO25, GPIO, TDM, SDIO, ASC), + MFP_XWAY(GPIO26, GPIO, EBU, TDM, SDIO), + MFP_XWAY(GPIO27, GPIO, TDM, SDIO, ASC), + MFP_XWAY(GPIO28, GPIO, GPT, MII, SDIO), + MFP_XWAY(GPIO29, GPIO, PCI, CBUS, MII), + MFP_XWAY(GPIO30, GPIO, PCI, CBUS, MII), + MFP_XWAY(GPIO31, GPIO, EBU, PCI, MII), +}; + +static const unsigned danube_exin_pin_map[] = {GPIO0, GPIO1, GPIO2}; + +static const unsigned danube_pins_exin0[] = {GPIO0}; +static const unsigned danube_pins_exin1[] = {GPIO1}; +static const unsigned danube_pins_exin2[] = {GPIO2}; + +static const unsigned danube_pins_jtag[] = {GPIO15, GPIO16, GPIO17, GPIO18, GPIO20}; +static const unsigned danube_pins_asc0[] = {GPIO11, GPIO12}; +static const unsigned danube_pins_asc0_cts_rts[] = {GPIO9, GPIO10}; +static const unsigned danube_pins_stp[] = {GPIO4, GPIO5, GPIO6}; +static const unsigned danube_pins_nmi[] = {GPIO8}; + +static const unsigned danube_pins_dfe_led0[] = {GPIO4}; +static const unsigned danube_pins_dfe_led1[] = {GPIO5}; + +static const unsigned danube_pins_ebu_a24[] = {GPIO13}; +static const unsigned danube_pins_ebu_clk[] = {GPIO21}; +static const unsigned danube_pins_ebu_cs1[] = {GPIO23}; +static const unsigned danube_pins_ebu_a23[] = {GPIO24}; +static const unsigned danube_pins_ebu_wait[] = {GPIO26}; +static const unsigned danube_pins_ebu_a25[] = {GPIO31}; + +static const unsigned danube_pins_nand_ale[] = {GPIO13}; +static const unsigned danube_pins_nand_cs1[] = {GPIO23}; +static const unsigned danube_pins_nand_cle[] = {GPIO24}; + +static const unsigned danube_pins_spi[] = {GPIO16, GPIO17, GPIO18}; /* DEPRECATED */ +static const unsigned danube_pins_spi_di[] = {GPIO16}; +static const unsigned danube_pins_spi_do[] = {GPIO17}; +static const unsigned danube_pins_spi_clk[] = {GPIO18}; +static const unsigned danube_pins_spi_cs1[] = {GPIO15}; +static const unsigned danube_pins_spi_cs2[] = {GPIO21}; +static const unsigned danube_pins_spi_cs3[] = {GPIO13}; +static const unsigned danube_pins_spi_cs4[] = {GPIO10}; +static const unsigned danube_pins_spi_cs5[] = {GPIO9}; +static const unsigned danube_pins_spi_cs6[] = {GPIO11}; + +static const unsigned danube_pins_gpt1[] = {GPIO28}; +static const unsigned danube_pins_gpt2[] = {GPIO21}; +static const unsigned danube_pins_gpt3[] = {GPIO6}; + +static const unsigned danube_pins_clkout0[] = {GPIO8}; +static const unsigned danube_pins_clkout1[] = {GPIO7}; +static const unsigned danube_pins_clkout2[] = {GPIO3}; +static const unsigned danube_pins_clkout3[] = {GPIO2}; + +static const unsigned danube_pins_pci_gnt1[] = {GPIO30}; +static const unsigned danube_pins_pci_gnt2[] = {GPIO23}; +static const unsigned danube_pins_pci_gnt3[] = {GPIO19}; +static const unsigned danube_pins_pci_req1[] = {GPIO29}; +static const unsigned danube_pins_pci_req2[] = {GPIO31}; +static const unsigned danube_pins_pci_req3[] = {GPIO3}; + +static const struct ltq_pin_group danube_grps[] = { + GRP_MUX("exin0", EXIN, danube_pins_exin0), + GRP_MUX("exin1", EXIN, danube_pins_exin1), + GRP_MUX("exin2", EXIN, danube_pins_exin2), + GRP_MUX("jtag", JTAG, danube_pins_jtag), + GRP_MUX("ebu a23", EBU, danube_pins_ebu_a23), + GRP_MUX("ebu a24", EBU, danube_pins_ebu_a24), + GRP_MUX("ebu a25", EBU, danube_pins_ebu_a25), + GRP_MUX("ebu clk", EBU, danube_pins_ebu_clk), + GRP_MUX("ebu cs1", EBU, danube_pins_ebu_cs1), + GRP_MUX("ebu wait", EBU, danube_pins_ebu_wait), + GRP_MUX("nand ale", EBU, danube_pins_nand_ale), + GRP_MUX("nand cs1", EBU, danube_pins_nand_cs1), + GRP_MUX("nand cle", EBU, danube_pins_nand_cle), + GRP_MUX("spi", SPI, danube_pins_spi), /* DEPRECATED */ + GRP_MUX("spi_di", SPI, danube_pins_spi_di), + GRP_MUX("spi_do", SPI, danube_pins_spi_do), + GRP_MUX("spi_clk", SPI, danube_pins_spi_clk), + GRP_MUX("spi_cs1", SPI, danube_pins_spi_cs1), + GRP_MUX("spi_cs2", SPI, danube_pins_spi_cs2), + GRP_MUX("spi_cs3", SPI, danube_pins_spi_cs3), + GRP_MUX("spi_cs4", SPI, danube_pins_spi_cs4), + GRP_MUX("spi_cs5", SPI, danube_pins_spi_cs5), + GRP_MUX("spi_cs6", SPI, danube_pins_spi_cs6), + GRP_MUX("asc0", ASC, danube_pins_asc0), + GRP_MUX("asc0 cts rts", ASC, danube_pins_asc0_cts_rts), + GRP_MUX("stp", STP, danube_pins_stp), + GRP_MUX("nmi", NMI, danube_pins_nmi), + GRP_MUX("gpt1", GPT, danube_pins_gpt1), + GRP_MUX("gpt2", GPT, danube_pins_gpt2), + GRP_MUX("gpt3", GPT, danube_pins_gpt3), + GRP_MUX("clkout0", CGU, danube_pins_clkout0), + GRP_MUX("clkout1", CGU, danube_pins_clkout1), + GRP_MUX("clkout2", CGU, danube_pins_clkout2), + GRP_MUX("clkout3", CGU, danube_pins_clkout3), + GRP_MUX("gnt1", PCI, danube_pins_pci_gnt1), + GRP_MUX("gnt2", PCI, danube_pins_pci_gnt2), + GRP_MUX("gnt3", PCI, danube_pins_pci_gnt3), + GRP_MUX("req1", PCI, danube_pins_pci_req1), + GRP_MUX("req2", PCI, danube_pins_pci_req2), + GRP_MUX("req3", PCI, danube_pins_pci_req3), + GRP_MUX("dfe led0", DFE, danube_pins_dfe_led0), + GRP_MUX("dfe led1", DFE, danube_pins_dfe_led1), +}; + +static const char * const danube_pci_grps[] = {"gnt1", "gnt2", + "gnt3", "req1", + "req2", "req3"}; +static const char * const danube_spi_grps[] = {"spi", /* DEPRECATED */ + "spi_di", "spi_do", + "spi_clk", "spi_cs1", + "spi_cs2", "spi_cs3", + "spi_cs4", "spi_cs5", + "spi_cs6"}; +static const char * const danube_cgu_grps[] = {"clkout0", "clkout1", + "clkout2", "clkout3"}; +static const char * const danube_ebu_grps[] = {"ebu a23", "ebu a24", + "ebu a25", "ebu cs1", + "ebu wait", "ebu clk", + "nand ale", "nand cs1", + "nand cle"}; +static const char * const danube_dfe_grps[] = {"dfe led0", "dfe led1"}; +static const char * const danube_exin_grps[] = {"exin0", "exin1", "exin2"}; +static const char * const danube_gpt_grps[] = {"gpt1", "gpt2", "gpt3"}; +static const char * const danube_asc_grps[] = {"asc0", "asc0 cts rts"}; +static const char * const danube_jtag_grps[] = {"jtag"}; +static const char * const danube_stp_grps[] = {"stp"}; +static const char * const danube_nmi_grps[] = {"nmi"}; + +static const struct ltq_pmx_func danube_funcs[] = { + {"spi", ARRAY_AND_SIZE(danube_spi_grps)}, + {"asc", ARRAY_AND_SIZE(danube_asc_grps)}, + {"cgu", ARRAY_AND_SIZE(danube_cgu_grps)}, + {"jtag", ARRAY_AND_SIZE(danube_jtag_grps)}, + {"exin", ARRAY_AND_SIZE(danube_exin_grps)}, + {"stp", ARRAY_AND_SIZE(danube_stp_grps)}, + {"gpt", ARRAY_AND_SIZE(danube_gpt_grps)}, + {"nmi", ARRAY_AND_SIZE(danube_nmi_grps)}, + {"pci", ARRAY_AND_SIZE(danube_pci_grps)}, + {"ebu", ARRAY_AND_SIZE(danube_ebu_grps)}, + {"dfe", ARRAY_AND_SIZE(danube_dfe_grps)}, +}; + +/* --------- xrx100 related code --------- */ +#define XRX100_MAX_PIN 56 + +static const struct ltq_mfp_pin xrx100_mfp[] = { + /* pin f0 f1 f2 f3 */ + MFP_XWAY(GPIO0, GPIO, EXIN, SDIO, TDM), + MFP_XWAY(GPIO1, GPIO, EXIN, CBUS, SIN), + MFP_XWAY(GPIO2, GPIO, CGU, EXIN, NONE), + MFP_XWAY(GPIO3, GPIO, CGU, SDIO, PCI), + MFP_XWAY(GPIO4, GPIO, STP, DFE, ASC), + MFP_XWAY(GPIO5, GPIO, STP, NONE, DFE), + MFP_XWAY(GPIO6, GPIO, STP, GPT, ASC), + MFP_XWAY(GPIO7, GPIO, CGU, CBUS, NONE), + MFP_XWAY(GPIO8, GPIO, CGU, NMI, NONE), + MFP_XWAY(GPIO9, GPIO, ASC, SPI, EXIN), + MFP_XWAY(GPIO10, GPIO, ASC, SPI, EXIN), + MFP_XWAY(GPIO11, GPIO, ASC, CBUS, SPI), + MFP_XWAY(GPIO12, GPIO, ASC, CBUS, MCD), + MFP_XWAY(GPIO13, GPIO, EBU, SPI, NONE), + MFP_XWAY(GPIO14, GPIO, CGU, NONE, NONE), + MFP_XWAY(GPIO15, GPIO, SPI, SDIO, MCD), + MFP_XWAY(GPIO16, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO17, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO18, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO19, GPIO, PCI, SDIO, CGU), + MFP_XWAY(GPIO20, GPIO, NONE, SDIO, EBU), + MFP_XWAY(GPIO21, GPIO, PCI, EBU, GPT), + MFP_XWAY(GPIO22, GPIO, SPI, NONE, EBU), + MFP_XWAY(GPIO23, GPIO, EBU, PCI, STP), + MFP_XWAY(GPIO24, GPIO, EBU, TDM, PCI), + MFP_XWAY(GPIO25, GPIO, TDM, SDIO, ASC), + MFP_XWAY(GPIO26, GPIO, EBU, TDM, SDIO), + MFP_XWAY(GPIO27, GPIO, TDM, SDIO, ASC), + MFP_XWAY(GPIO28, GPIO, GPT, NONE, SDIO), + MFP_XWAY(GPIO29, GPIO, PCI, CBUS, NONE), + MFP_XWAY(GPIO30, GPIO, PCI, CBUS, NONE), + MFP_XWAY(GPIO31, GPIO, EBU, PCI, NONE), + MFP_XWAY(GPIO32, GPIO, MII, NONE, EBU), + MFP_XWAY(GPIO33, GPIO, MII, NONE, EBU), + MFP_XWAY(GPIO34, GPIO, SIN, SSI, NONE), + MFP_XWAY(GPIO35, GPIO, SIN, SSI, NONE), + MFP_XWAY(GPIO36, GPIO, SIN, SSI, NONE), + MFP_XWAY(GPIO37, GPIO, PCI, NONE, NONE), + MFP_XWAY(GPIO38, GPIO, PCI, NONE, NONE), + MFP_XWAY(GPIO39, GPIO, NONE, EXIN, NONE), + MFP_XWAY(GPIO40, GPIO, MII, TDM, NONE), + MFP_XWAY(GPIO41, GPIO, MII, TDM, NONE), + MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO44, GPIO, MII, SIN, NONE), + MFP_XWAY(GPIO45, GPIO, MII, NONE, SIN), + MFP_XWAY(GPIO46, GPIO, MII, NONE, EXIN), + MFP_XWAY(GPIO47, GPIO, MII, NONE, SIN), + MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO50, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO51, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO52, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO53, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO54, GPIO, NONE, NONE, NONE), + MFP_XWAY(GPIO55, GPIO, NONE, NONE, NONE), +}; + +static const unsigned xrx100_exin_pin_map[] = {GPIO0, GPIO1, GPIO2, GPIO39, GPIO10, GPIO9}; + +static const unsigned xrx100_pins_exin0[] = {GPIO0}; +static const unsigned xrx100_pins_exin1[] = {GPIO1}; +static const unsigned xrx100_pins_exin2[] = {GPIO2}; +static const unsigned xrx100_pins_exin3[] = {GPIO39}; +static const unsigned xrx100_pins_exin4[] = {GPIO10}; +static const unsigned xrx100_pins_exin5[] = {GPIO9}; + +static const unsigned xrx100_pins_asc0[] = {GPIO11, GPIO12}; +static const unsigned xrx100_pins_asc0_cts_rts[] = {GPIO9, GPIO10}; +static const unsigned xrx100_pins_stp[] = {GPIO4, GPIO5, GPIO6}; +static const unsigned xrx100_pins_nmi[] = {GPIO8}; +static const unsigned xrx100_pins_mdio[] = {GPIO42, GPIO43}; + +static const unsigned xrx100_pins_dfe_led0[] = {GPIO4}; +static const unsigned xrx100_pins_dfe_led1[] = {GPIO5}; + +static const unsigned xrx100_pins_ebu_a24[] = {GPIO13}; +static const unsigned xrx100_pins_ebu_clk[] = {GPIO21}; +static const unsigned xrx100_pins_ebu_cs1[] = {GPIO23}; +static const unsigned xrx100_pins_ebu_a23[] = {GPIO24}; +static const unsigned xrx100_pins_ebu_wait[] = {GPIO26}; +static const unsigned xrx100_pins_ebu_a25[] = {GPIO31}; + +static const unsigned xrx100_pins_nand_ale[] = {GPIO13}; +static const unsigned xrx100_pins_nand_cs1[] = {GPIO23}; +static const unsigned xrx100_pins_nand_cle[] = {GPIO24}; +static const unsigned xrx100_pins_nand_rdy[] = {GPIO48}; +static const unsigned xrx100_pins_nand_rd[] = {GPIO49}; + +static const unsigned xrx100_pins_spi_di[] = {GPIO16}; +static const unsigned xrx100_pins_spi_do[] = {GPIO17}; +static const unsigned xrx100_pins_spi_clk[] = {GPIO18}; +static const unsigned xrx100_pins_spi_cs1[] = {GPIO15}; +static const unsigned xrx100_pins_spi_cs2[] = {GPIO22}; +static const unsigned xrx100_pins_spi_cs3[] = {GPIO13}; +static const unsigned xrx100_pins_spi_cs4[] = {GPIO10}; +static const unsigned xrx100_pins_spi_cs5[] = {GPIO9}; +static const unsigned xrx100_pins_spi_cs6[] = {GPIO11}; + +static const unsigned xrx100_pins_gpt1[] = {GPIO28}; +static const unsigned xrx100_pins_gpt2[] = {GPIO21}; +static const unsigned xrx100_pins_gpt3[] = {GPIO6}; + +static const unsigned xrx100_pins_clkout0[] = {GPIO8}; +static const unsigned xrx100_pins_clkout1[] = {GPIO7}; +static const unsigned xrx100_pins_clkout2[] = {GPIO3}; +static const unsigned xrx100_pins_clkout3[] = {GPIO2}; + +static const unsigned xrx100_pins_pci_gnt1[] = {GPIO30}; +static const unsigned xrx100_pins_pci_gnt2[] = {GPIO23}; +static const unsigned xrx100_pins_pci_gnt3[] = {GPIO19}; +static const unsigned xrx100_pins_pci_gnt4[] = {GPIO38}; +static const unsigned xrx100_pins_pci_req1[] = {GPIO29}; +static const unsigned xrx100_pins_pci_req2[] = {GPIO31}; +static const unsigned xrx100_pins_pci_req3[] = {GPIO3}; +static const unsigned xrx100_pins_pci_req4[] = {GPIO37}; + +static const struct ltq_pin_group xrx100_grps[] = { + GRP_MUX("exin0", EXIN, xrx100_pins_exin0), + GRP_MUX("exin1", EXIN, xrx100_pins_exin1), + GRP_MUX("exin2", EXIN, xrx100_pins_exin2), + GRP_MUX("exin3", EXIN, xrx100_pins_exin3), + GRP_MUX("exin4", EXIN, xrx100_pins_exin4), + GRP_MUX("exin5", EXIN, xrx100_pins_exin5), + GRP_MUX("ebu a23", EBU, xrx100_pins_ebu_a23), + GRP_MUX("ebu a24", EBU, xrx100_pins_ebu_a24), + GRP_MUX("ebu a25", EBU, xrx100_pins_ebu_a25), + GRP_MUX("ebu clk", EBU, xrx100_pins_ebu_clk), + GRP_MUX("ebu cs1", EBU, xrx100_pins_ebu_cs1), + GRP_MUX("ebu wait", EBU, xrx100_pins_ebu_wait), + GRP_MUX("nand ale", EBU, xrx100_pins_nand_ale), + GRP_MUX("nand cs1", EBU, xrx100_pins_nand_cs1), + GRP_MUX("nand cle", EBU, xrx100_pins_nand_cle), + GRP_MUX("nand rdy", EBU, xrx100_pins_nand_rdy), + GRP_MUX("nand rd", EBU, xrx100_pins_nand_rd), + GRP_MUX("spi_di", SPI, xrx100_pins_spi_di), + GRP_MUX("spi_do", SPI, xrx100_pins_spi_do), + GRP_MUX("spi_clk", SPI, xrx100_pins_spi_clk), + GRP_MUX("spi_cs1", SPI, xrx100_pins_spi_cs1), + GRP_MUX("spi_cs2", SPI, xrx100_pins_spi_cs2), + GRP_MUX("spi_cs3", SPI, xrx100_pins_spi_cs3), + GRP_MUX("spi_cs4", SPI, xrx100_pins_spi_cs4), + GRP_MUX("spi_cs5", SPI, xrx100_pins_spi_cs5), + GRP_MUX("spi_cs6", SPI, xrx100_pins_spi_cs6), + GRP_MUX("asc0", ASC, xrx100_pins_asc0), + GRP_MUX("asc0 cts rts", ASC, xrx100_pins_asc0_cts_rts), + GRP_MUX("stp", STP, xrx100_pins_stp), + GRP_MUX("nmi", NMI, xrx100_pins_nmi), + GRP_MUX("gpt1", GPT, xrx100_pins_gpt1), + GRP_MUX("gpt2", GPT, xrx100_pins_gpt2), + GRP_MUX("gpt3", GPT, xrx100_pins_gpt3), + GRP_MUX("clkout0", CGU, xrx100_pins_clkout0), + GRP_MUX("clkout1", CGU, xrx100_pins_clkout1), + GRP_MUX("clkout2", CGU, xrx100_pins_clkout2), + GRP_MUX("clkout3", CGU, xrx100_pins_clkout3), + GRP_MUX("gnt1", PCI, xrx100_pins_pci_gnt1), + GRP_MUX("gnt2", PCI, xrx100_pins_pci_gnt2), + GRP_MUX("gnt3", PCI, xrx100_pins_pci_gnt3), + GRP_MUX("gnt4", PCI, xrx100_pins_pci_gnt4), + GRP_MUX("req1", PCI, xrx100_pins_pci_req1), + GRP_MUX("req2", PCI, xrx100_pins_pci_req2), + GRP_MUX("req3", PCI, xrx100_pins_pci_req3), + GRP_MUX("req4", PCI, xrx100_pins_pci_req4), + GRP_MUX("mdio", MDIO, xrx100_pins_mdio), + GRP_MUX("dfe led0", DFE, xrx100_pins_dfe_led0), + GRP_MUX("dfe led1", DFE, xrx100_pins_dfe_led1), +}; + +static const char * const xrx100_pci_grps[] = {"gnt1", "gnt2", + "gnt3", "gnt4", + "req1", "req2", + "req3", "req4"}; +static const char * const xrx100_spi_grps[] = {"spi_di", "spi_do", + "spi_clk", "spi_cs1", + "spi_cs2", "spi_cs3", + "spi_cs4", "spi_cs5", + "spi_cs6"}; +static const char * const xrx100_cgu_grps[] = {"clkout0", "clkout1", + "clkout2", "clkout3"}; +static const char * const xrx100_ebu_grps[] = {"ebu a23", "ebu a24", + "ebu a25", "ebu cs1", + "ebu wait", "ebu clk", + "nand ale", "nand cs1", + "nand cle", "nand rdy", + "nand rd"}; +static const char * const xrx100_exin_grps[] = {"exin0", "exin1", "exin2", + "exin3", "exin4", "exin5"}; +static const char * const xrx100_gpt_grps[] = {"gpt1", "gpt2", "gpt3"}; +static const char * const xrx100_asc_grps[] = {"asc0", "asc0 cts rts"}; +static const char * const xrx100_stp_grps[] = {"stp"}; +static const char * const xrx100_nmi_grps[] = {"nmi"}; +static const char * const xrx100_mdio_grps[] = {"mdio"}; +static const char * const xrx100_dfe_grps[] = {"dfe led0", "dfe led1"}; + +static const struct ltq_pmx_func xrx100_funcs[] = { + {"spi", ARRAY_AND_SIZE(xrx100_spi_grps)}, + {"asc", ARRAY_AND_SIZE(xrx100_asc_grps)}, + {"cgu", ARRAY_AND_SIZE(xrx100_cgu_grps)}, + {"exin", ARRAY_AND_SIZE(xrx100_exin_grps)}, + {"stp", ARRAY_AND_SIZE(xrx100_stp_grps)}, + {"gpt", ARRAY_AND_SIZE(xrx100_gpt_grps)}, + {"nmi", ARRAY_AND_SIZE(xrx100_nmi_grps)}, + {"pci", ARRAY_AND_SIZE(xrx100_pci_grps)}, + {"ebu", ARRAY_AND_SIZE(xrx100_ebu_grps)}, + {"mdio", ARRAY_AND_SIZE(xrx100_mdio_grps)}, + {"dfe", ARRAY_AND_SIZE(xrx100_dfe_grps)}, +}; + +/* --------- xrx200 related code --------- */ +#define XRX200_MAX_PIN 50 + +static const struct ltq_mfp_pin xrx200_mfp[] = { + /* pin f0 f1 f2 f3 */ + MFP_XWAY(GPIO0, GPIO, EXIN, SDIO, TDM), + MFP_XWAY(GPIO1, GPIO, EXIN, CBUS, SIN), + MFP_XWAY(GPIO2, GPIO, CGU, EXIN, GPHY), + MFP_XWAY(GPIO3, GPIO, CGU, SDIO, PCI), + MFP_XWAY(GPIO4, GPIO, STP, DFE, USIF), + MFP_XWAY(GPIO5, GPIO, STP, GPHY, DFE), + MFP_XWAY(GPIO6, GPIO, STP, GPT, USIF), + MFP_XWAY(GPIO7, GPIO, CGU, CBUS, GPHY), + MFP_XWAY(GPIO8, GPIO, CGU, NMI, NONE), + MFP_XWAY(GPIO9, GPIO, USIF, SPI, EXIN), + MFP_XWAY(GPIO10, GPIO, USIF, SPI, EXIN), + MFP_XWAY(GPIO11, GPIO, USIF, CBUS, SPI), + MFP_XWAY(GPIO12, GPIO, USIF, CBUS, MCD), + MFP_XWAY(GPIO13, GPIO, EBU, SPI, NONE), + MFP_XWAY(GPIO14, GPIO, CGU, CBUS, USIF), + MFP_XWAY(GPIO15, GPIO, SPI, SDIO, MCD), + MFP_XWAY(GPIO16, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO17, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO18, GPIO, SPI, SDIO, NONE), + MFP_XWAY(GPIO19, GPIO, PCI, SDIO, CGU), + MFP_XWAY(GPIO20, GPIO, NONE, SDIO, EBU), + MFP_XWAY(GPIO21, GPIO, PCI, EBU, GPT), + MFP_XWAY(GPIO22, GPIO, SPI, CGU, EBU), + MFP_XWAY(GPIO23, GPIO, EBU, PCI, STP), + MFP_XWAY(GPIO24, GPIO, EBU, TDM, PCI), + MFP_XWAY(GPIO25, GPIO, TDM, SDIO, USIF), + MFP_XWAY(GPIO26, GPIO, EBU, TDM, SDIO), + MFP_XWAY(GPIO27, GPIO, TDM, SDIO, USIF), + MFP_XWAY(GPIO28, GPIO, GPT, PCI, SDIO), + MFP_XWAY(GPIO29, GPIO, PCI, CBUS, EXIN), + MFP_XWAY(GPIO30, GPIO, PCI, CBUS, NONE), + MFP_XWAY(GPIO31, GPIO, EBU, PCI, NONE), + MFP_XWAY(GPIO32, GPIO, MII, NONE, EBU), + MFP_XWAY(GPIO33, GPIO, MII, NONE, EBU), + MFP_XWAY(GPIO34, GPIO, SIN, SSI, NONE), + MFP_XWAY(GPIO35, GPIO, SIN, SSI, NONE), + MFP_XWAY(GPIO36, GPIO, SIN, SSI, EXIN), + MFP_XWAY(GPIO37, GPIO, USIF, NONE, PCI), + MFP_XWAY(GPIO38, GPIO, PCI, USIF, NONE), + MFP_XWAY(GPIO39, GPIO, USIF, EXIN, NONE), + MFP_XWAY(GPIO40, GPIO, MII, TDM, NONE), + MFP_XWAY(GPIO41, GPIO, MII, TDM, NONE), + MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO44, GPIO, MII, SIN, GPHY), + MFP_XWAY(GPIO45, GPIO, MII, GPHY, SIN), + MFP_XWAY(GPIO46, GPIO, MII, NONE, EXIN), + MFP_XWAY(GPIO47, GPIO, MII, GPHY, SIN), + MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE), +}; + +static const unsigned xrx200_exin_pin_map[] = {GPIO0, GPIO1, GPIO2, GPIO39, GPIO10, GPIO9}; + +static const unsigned xrx200_pins_exin0[] = {GPIO0}; +static const unsigned xrx200_pins_exin1[] = {GPIO1}; +static const unsigned xrx200_pins_exin2[] = {GPIO2}; +static const unsigned xrx200_pins_exin3[] = {GPIO39}; +static const unsigned xrx200_pins_exin4[] = {GPIO10}; +static const unsigned xrx200_pins_exin5[] = {GPIO9}; + +static const unsigned xrx200_pins_usif_uart_rx[] = {GPIO11}; +static const unsigned xrx200_pins_usif_uart_tx[] = {GPIO12}; +static const unsigned xrx200_pins_usif_uart_rts[] = {GPIO9}; +static const unsigned xrx200_pins_usif_uart_cts[] = {GPIO10}; +static const unsigned xrx200_pins_usif_uart_dtr[] = {GPIO4}; +static const unsigned xrx200_pins_usif_uart_dsr[] = {GPIO6}; +static const unsigned xrx200_pins_usif_uart_dcd[] = {GPIO25}; +static const unsigned xrx200_pins_usif_uart_ri[] = {GPIO27}; + +static const unsigned xrx200_pins_usif_spi_di[] = {GPIO11}; +static const unsigned xrx200_pins_usif_spi_do[] = {GPIO12}; +static const unsigned xrx200_pins_usif_spi_clk[] = {GPIO38}; +static const unsigned xrx200_pins_usif_spi_cs0[] = {GPIO37}; +static const unsigned xrx200_pins_usif_spi_cs1[] = {GPIO39}; +static const unsigned xrx200_pins_usif_spi_cs2[] = {GPIO14}; + +static const unsigned xrx200_pins_stp[] = {GPIO4, GPIO5, GPIO6}; +static const unsigned xrx200_pins_nmi[] = {GPIO8}; +static const unsigned xrx200_pins_mdio[] = {GPIO42, GPIO43}; + +static const unsigned xrx200_pins_dfe_led0[] = {GPIO4}; +static const unsigned xrx200_pins_dfe_led1[] = {GPIO5}; + +static const unsigned xrx200_pins_gphy0_led0[] = {GPIO5}; +static const unsigned xrx200_pins_gphy0_led1[] = {GPIO7}; +static const unsigned xrx200_pins_gphy0_led2[] = {GPIO2}; +static const unsigned xrx200_pins_gphy1_led0[] = {GPIO44}; +static const unsigned xrx200_pins_gphy1_led1[] = {GPIO45}; +static const unsigned xrx200_pins_gphy1_led2[] = {GPIO47}; + +static const unsigned xrx200_pins_ebu_a24[] = {GPIO13}; +static const unsigned xrx200_pins_ebu_clk[] = {GPIO21}; +static const unsigned xrx200_pins_ebu_cs1[] = {GPIO23}; +static const unsigned xrx200_pins_ebu_a23[] = {GPIO24}; +static const unsigned xrx200_pins_ebu_wait[] = {GPIO26}; +static const unsigned xrx200_pins_ebu_a25[] = {GPIO31}; + +static const unsigned xrx200_pins_nand_ale[] = {GPIO13}; +static const unsigned xrx200_pins_nand_cs1[] = {GPIO23}; +static const unsigned xrx200_pins_nand_cle[] = {GPIO24}; +static const unsigned xrx200_pins_nand_rdy[] = {GPIO48}; +static const unsigned xrx200_pins_nand_rd[] = {GPIO49}; + +static const unsigned xrx200_pins_spi_di[] = {GPIO16}; +static const unsigned xrx200_pins_spi_do[] = {GPIO17}; +static const unsigned xrx200_pins_spi_clk[] = {GPIO18}; +static const unsigned xrx200_pins_spi_cs1[] = {GPIO15}; +static const unsigned xrx200_pins_spi_cs2[] = {GPIO22}; +static const unsigned xrx200_pins_spi_cs3[] = {GPIO13}; +static const unsigned xrx200_pins_spi_cs4[] = {GPIO10}; +static const unsigned xrx200_pins_spi_cs5[] = {GPIO9}; +static const unsigned xrx200_pins_spi_cs6[] = {GPIO11}; + +static const unsigned xrx200_pins_gpt1[] = {GPIO28}; +static const unsigned xrx200_pins_gpt2[] = {GPIO21}; +static const unsigned xrx200_pins_gpt3[] = {GPIO6}; + +static const unsigned xrx200_pins_clkout0[] = {GPIO8}; +static const unsigned xrx200_pins_clkout1[] = {GPIO7}; +static const unsigned xrx200_pins_clkout2[] = {GPIO3}; +static const unsigned xrx200_pins_clkout3[] = {GPIO2}; + +static const unsigned xrx200_pins_pci_gnt1[] = {GPIO28}; +static const unsigned xrx200_pins_pci_gnt2[] = {GPIO23}; +static const unsigned xrx200_pins_pci_gnt3[] = {GPIO19}; +static const unsigned xrx200_pins_pci_gnt4[] = {GPIO38}; +static const unsigned xrx200_pins_pci_req1[] = {GPIO29}; +static const unsigned xrx200_pins_pci_req2[] = {GPIO31}; +static const unsigned xrx200_pins_pci_req3[] = {GPIO3}; +static const unsigned xrx200_pins_pci_req4[] = {GPIO37}; + +static const struct ltq_pin_group xrx200_grps[] = { + GRP_MUX("exin0", EXIN, xrx200_pins_exin0), + GRP_MUX("exin1", EXIN, xrx200_pins_exin1), + GRP_MUX("exin2", EXIN, xrx200_pins_exin2), + GRP_MUX("exin3", EXIN, xrx200_pins_exin3), + GRP_MUX("exin4", EXIN, xrx200_pins_exin4), + GRP_MUX("exin5", EXIN, xrx200_pins_exin5), + GRP_MUX("ebu a23", EBU, xrx200_pins_ebu_a23), + GRP_MUX("ebu a24", EBU, xrx200_pins_ebu_a24), + GRP_MUX("ebu a25", EBU, xrx200_pins_ebu_a25), + GRP_MUX("ebu clk", EBU, xrx200_pins_ebu_clk), + GRP_MUX("ebu cs1", EBU, xrx200_pins_ebu_cs1), + GRP_MUX("ebu wait", EBU, xrx200_pins_ebu_wait), + GRP_MUX("nand ale", EBU, xrx200_pins_nand_ale), + GRP_MUX("nand cs1", EBU, xrx200_pins_nand_cs1), + GRP_MUX("nand cle", EBU, xrx200_pins_nand_cle), + GRP_MUX("nand rdy", EBU, xrx200_pins_nand_rdy), + GRP_MUX("nand rd", EBU, xrx200_pins_nand_rd), + GRP_MUX("spi_di", SPI, xrx200_pins_spi_di), + GRP_MUX("spi_do", SPI, xrx200_pins_spi_do), + GRP_MUX("spi_clk", SPI, xrx200_pins_spi_clk), + GRP_MUX("spi_cs1", SPI, xrx200_pins_spi_cs1), + GRP_MUX("spi_cs2", SPI, xrx200_pins_spi_cs2), + GRP_MUX("spi_cs3", SPI, xrx200_pins_spi_cs3), + GRP_MUX("spi_cs4", SPI, xrx200_pins_spi_cs4), + GRP_MUX("spi_cs5", SPI, xrx200_pins_spi_cs5), + GRP_MUX("spi_cs6", SPI, xrx200_pins_spi_cs6), + GRP_MUX("usif uart_rx", USIF, xrx200_pins_usif_uart_rx), + GRP_MUX("usif uart_rx", USIF, xrx200_pins_usif_uart_tx), + GRP_MUX("usif uart_rts", USIF, xrx200_pins_usif_uart_rts), + GRP_MUX("usif uart_cts", USIF, xrx200_pins_usif_uart_cts), + GRP_MUX("usif uart_dtr", USIF, xrx200_pins_usif_uart_dtr), + GRP_MUX("usif uart_dsr", USIF, xrx200_pins_usif_uart_dsr), + GRP_MUX("usif uart_dcd", USIF, xrx200_pins_usif_uart_dcd), + GRP_MUX("usif uart_ri", USIF, xrx200_pins_usif_uart_ri), + GRP_MUX("usif spi_di", USIF, xrx200_pins_usif_spi_di), + GRP_MUX("usif spi_do", USIF, xrx200_pins_usif_spi_do), + GRP_MUX("usif spi_clk", USIF, xrx200_pins_usif_spi_clk), + GRP_MUX("usif spi_cs0", USIF, xrx200_pins_usif_spi_cs0), + GRP_MUX("usif spi_cs1", USIF, xrx200_pins_usif_spi_cs1), + GRP_MUX("usif spi_cs2", USIF, xrx200_pins_usif_spi_cs2), + GRP_MUX("stp", STP, xrx200_pins_stp), + GRP_MUX("nmi", NMI, xrx200_pins_nmi), + GRP_MUX("gpt1", GPT, xrx200_pins_gpt1), + GRP_MUX("gpt2", GPT, xrx200_pins_gpt2), + GRP_MUX("gpt3", GPT, xrx200_pins_gpt3), + GRP_MUX("clkout0", CGU, xrx200_pins_clkout0), + GRP_MUX("clkout1", CGU, xrx200_pins_clkout1), + GRP_MUX("clkout2", CGU, xrx200_pins_clkout2), + GRP_MUX("clkout3", CGU, xrx200_pins_clkout3), + GRP_MUX("gnt1", PCI, xrx200_pins_pci_gnt1), + GRP_MUX("gnt2", PCI, xrx200_pins_pci_gnt2), + GRP_MUX("gnt3", PCI, xrx200_pins_pci_gnt3), + GRP_MUX("gnt4", PCI, xrx200_pins_pci_gnt4), + GRP_MUX("req1", PCI, xrx200_pins_pci_req1), + GRP_MUX("req2", PCI, xrx200_pins_pci_req2), + GRP_MUX("req3", PCI, xrx200_pins_pci_req3), + GRP_MUX("req4", PCI, xrx200_pins_pci_req4), + GRP_MUX("mdio", MDIO, xrx200_pins_mdio), + GRP_MUX("dfe led0", DFE, xrx200_pins_dfe_led0), + GRP_MUX("dfe led1", DFE, xrx200_pins_dfe_led1), + GRP_MUX("gphy0 led0", GPHY, xrx200_pins_gphy0_led0), + GRP_MUX("gphy0 led1", GPHY, xrx200_pins_gphy0_led1), + GRP_MUX("gphy0 led2", GPHY, xrx200_pins_gphy0_led2), + GRP_MUX("gphy1 led0", GPHY, xrx200_pins_gphy1_led0), + GRP_MUX("gphy1 led1", GPHY, xrx200_pins_gphy1_led1), + GRP_MUX("gphy1 led2", GPHY, xrx200_pins_gphy1_led2), +}; + +static const char * const xrx200_pci_grps[] = {"gnt1", "gnt2", + "gnt3", "gnt4", + "req1", "req2", + "req3", "req4"}; +static const char * const xrx200_spi_grps[] = {"spi_di", "spi_do", + "spi_clk", "spi_cs1", + "spi_cs2", "spi_cs3", + "spi_cs4", "spi_cs5", + "spi_cs6"}; +static const char * const xrx200_cgu_grps[] = {"clkout0", "clkout1", + "clkout2", "clkout3"}; +static const char * const xrx200_ebu_grps[] = {"ebu a23", "ebu a24", + "ebu a25", "ebu cs1", + "ebu wait", "ebu clk", + "nand ale", "nand cs1", + "nand cle", "nand rdy", + "nand rd"}; +static const char * const xrx200_exin_grps[] = {"exin0", "exin1", "exin2", + "exin3", "exin4", "exin5"}; +static const char * const xrx200_gpt_grps[] = {"gpt1", "gpt2", "gpt3"}; +static const char * const xrx200_usif_grps[] = {"usif uart_rx", "usif uart_tx", + "usif uart_rts", "usif uart_cts", + "usif uart_dtr", "usif uart_dsr", + "usif uart_dcd", "usif uart_ri", + "usif spi_di", "usif spi_do", + "usif spi_clk", "usif spi_cs0", + "usif spi_cs1", "usif spi_cs2"}; +static const char * const xrx200_stp_grps[] = {"stp"}; +static const char * const xrx200_nmi_grps[] = {"nmi"}; +static const char * const xrx200_mdio_grps[] = {"mdio"}; +static const char * const xrx200_dfe_grps[] = {"dfe led0", "dfe led1"}; +static const char * const xrx200_gphy_grps[] = {"gphy0 led0", "gphy0 led1", + "gphy0 led2", "gphy1 led0", + "gphy1 led1", "gphy1 led2"}; + +static const struct ltq_pmx_func xrx200_funcs[] = { + {"spi", ARRAY_AND_SIZE(xrx200_spi_grps)}, + {"usif", ARRAY_AND_SIZE(xrx200_usif_grps)}, + {"cgu", ARRAY_AND_SIZE(xrx200_cgu_grps)}, + {"exin", ARRAY_AND_SIZE(xrx200_exin_grps)}, + {"stp", ARRAY_AND_SIZE(xrx200_stp_grps)}, + {"gpt", ARRAY_AND_SIZE(xrx200_gpt_grps)}, + {"nmi", ARRAY_AND_SIZE(xrx200_nmi_grps)}, + {"pci", ARRAY_AND_SIZE(xrx200_pci_grps)}, + {"ebu", ARRAY_AND_SIZE(xrx200_ebu_grps)}, + {"mdio", ARRAY_AND_SIZE(xrx200_mdio_grps)}, + {"dfe", ARRAY_AND_SIZE(xrx200_dfe_grps)}, + {"gphy", ARRAY_AND_SIZE(xrx200_gphy_grps)}, +}; + +/* --------- xrx300 related code --------- */ +#define XRX300_MAX_PIN 64 + +static const struct ltq_mfp_pin xrx300_mfp[] = { + /* pin f0 f1 f2 f3 */ + MFP_XWAY(GPIO0, GPIO, EXIN, EPHY, NONE), + MFP_XWAY(GPIO1, GPIO, NONE, EXIN, NONE), + MFP_XWAY(GPIO2, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO3, GPIO, CGU, NONE, NONE), + MFP_XWAY(GPIO4, GPIO, STP, DFE, NONE), + MFP_XWAY(GPIO5, GPIO, STP, EPHY, DFE), + MFP_XWAY(GPIO6, GPIO, STP, NONE, NONE), + MFP_XWAY(GPIO7, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO8, GPIO, CGU, GPHY, EPHY), + MFP_XWAY(GPIO9, GPIO, WIFI, NONE, EXIN), + MFP_XWAY(GPIO10, GPIO, USIF, SPI, EXIN), + MFP_XWAY(GPIO11, GPIO, USIF, WIFI, SPI), + MFP_XWAY(GPIO12, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO13, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO14, GPIO, CGU, USIF, EPHY), + MFP_XWAY(GPIO15, GPIO, SPI, NONE, MCD), + MFP_XWAY(GPIO16, GPIO, SPI, EXIN, NONE), + MFP_XWAY(GPIO17, GPIO, SPI, NONE, NONE), + MFP_XWAY(GPIO18, GPIO, SPI, NONE, NONE), + MFP_XWAY(GPIO19, GPIO, USIF, NONE, EPHY), + MFP_XWAY(GPIO20, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO21, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO22, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO23, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO24, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO25, GPIO, TDM, NONE, NONE), + MFP_XWAY(GPIO26, GPIO, TDM, NONE, NONE), + MFP_XWAY(GPIO27, GPIO, TDM, NONE, NONE), + MFP_XWAY(GPIO28, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO29, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO30, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO31, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO32, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO33, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO34, GPIO, NONE, SSI, NONE), + MFP_XWAY(GPIO35, GPIO, NONE, SSI, NONE), + MFP_XWAY(GPIO36, GPIO, NONE, SSI, NONE), + MFP_XWAY(GPIO37, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO38, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO39, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO40, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO41, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE), + MFP_XWAY(GPIO44, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO45, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO46, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO47, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO50, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO51, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO52, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO53, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO54, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO55, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO56, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO57, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO58, GPIO, EBU, TDM, NONE), + MFP_XWAY(GPIO59, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO60, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO61, GPIO, EBU, NONE, NONE), + MFP_XWAY(GPIO62, NONE, NONE, NONE, NONE), + MFP_XWAY(GPIO63, NONE, NONE, NONE, NONE), +}; + +static const unsigned xrx300_exin_pin_map[] = {GPIO0, GPIO1, GPIO16, GPIO10, GPIO9}; + +static const unsigned xrx300_pins_exin0[] = {GPIO0}; +static const unsigned xrx300_pins_exin1[] = {GPIO1}; +static const unsigned xrx300_pins_exin2[] = {GPIO16}; +/* EXIN3 is not available on xrX300 */ +static const unsigned xrx300_pins_exin4[] = {GPIO10}; +static const unsigned xrx300_pins_exin5[] = {GPIO9}; + +static const unsigned xrx300_pins_usif_uart_rx[] = {GPIO11}; +static const unsigned xrx300_pins_usif_uart_tx[] = {GPIO10}; + +static const unsigned xrx300_pins_usif_spi_di[] = {GPIO11}; +static const unsigned xrx300_pins_usif_spi_do[] = {GPIO10}; +static const unsigned xrx300_pins_usif_spi_clk[] = {GPIO19}; +static const unsigned xrx300_pins_usif_spi_cs0[] = {GPIO14}; + +static const unsigned xrx300_pins_stp[] = {GPIO4, GPIO5, GPIO6}; +static const unsigned xrx300_pins_mdio[] = {GPIO42, GPIO43}; + +static const unsigned xrx300_pins_dfe_led0[] = {GPIO4}; +static const unsigned xrx300_pins_dfe_led1[] = {GPIO5}; + +static const unsigned xrx300_pins_ephy0_led0[] = {GPIO5}; +static const unsigned xrx300_pins_ephy0_led1[] = {GPIO8}; +static const unsigned xrx300_pins_ephy1_led0[] = {GPIO14}; +static const unsigned xrx300_pins_ephy1_led1[] = {GPIO19}; + +static const unsigned xrx300_pins_nand_ale[] = {GPIO13}; +static const unsigned xrx300_pins_nand_cs1[] = {GPIO23}; +static const unsigned xrx300_pins_nand_cle[] = {GPIO24}; +static const unsigned xrx300_pins_nand_rdy[] = {GPIO48}; +static const unsigned xrx300_pins_nand_rd[] = {GPIO49}; +static const unsigned xrx300_pins_nand_d1[] = {GPIO50}; +static const unsigned xrx300_pins_nand_d0[] = {GPIO51}; +static const unsigned xrx300_pins_nand_d2[] = {GPIO52}; +static const unsigned xrx300_pins_nand_d7[] = {GPIO53}; +static const unsigned xrx300_pins_nand_d6[] = {GPIO54}; +static const unsigned xrx300_pins_nand_d5[] = {GPIO55}; +static const unsigned xrx300_pins_nand_d4[] = {GPIO56}; +static const unsigned xrx300_pins_nand_d3[] = {GPIO57}; +static const unsigned xrx300_pins_nand_cs0[] = {GPIO58}; +static const unsigned xrx300_pins_nand_wr[] = {GPIO59}; +static const unsigned xrx300_pins_nand_wp[] = {GPIO60}; +static const unsigned xrx300_pins_nand_se[] = {GPIO61}; + +static const unsigned xrx300_pins_spi_di[] = {GPIO16}; +static const unsigned xrx300_pins_spi_do[] = {GPIO17}; +static const unsigned xrx300_pins_spi_clk[] = {GPIO18}; +static const unsigned xrx300_pins_spi_cs1[] = {GPIO15}; +/* SPI_CS2 is not available on xrX300 */ +/* SPI_CS3 is not available on xrX300 */ +static const unsigned xrx300_pins_spi_cs4[] = {GPIO10}; +/* SPI_CS5 is not available on xrX300 */ +static const unsigned xrx300_pins_spi_cs6[] = {GPIO11}; + +/* CLKOUT0 is not available on xrX300 */ +/* CLKOUT1 is not available on xrX300 */ +static const unsigned xrx300_pins_clkout2[] = {GPIO3}; + +static const struct ltq_pin_group xrx300_grps[] = { + GRP_MUX("exin0", EXIN, xrx300_pins_exin0), + GRP_MUX("exin1", EXIN, xrx300_pins_exin1), + GRP_MUX("exin2", EXIN, xrx300_pins_exin2), + GRP_MUX("exin4", EXIN, xrx300_pins_exin4), + GRP_MUX("exin5", EXIN, xrx300_pins_exin5), + GRP_MUX("nand ale", EBU, xrx300_pins_nand_ale), + GRP_MUX("nand cs1", EBU, xrx300_pins_nand_cs1), + GRP_MUX("nand cle", EBU, xrx300_pins_nand_cle), + GRP_MUX("nand rdy", EBU, xrx300_pins_nand_rdy), + GRP_MUX("nand rd", EBU, xrx300_pins_nand_rd), + GRP_MUX("nand d1", EBU, xrx300_pins_nand_d1), + GRP_MUX("nand d0", EBU, xrx300_pins_nand_d0), + GRP_MUX("nand d2", EBU, xrx300_pins_nand_d2), + GRP_MUX("nand d7", EBU, xrx300_pins_nand_d7), + GRP_MUX("nand d6", EBU, xrx300_pins_nand_d6), + GRP_MUX("nand d5", EBU, xrx300_pins_nand_d5), + GRP_MUX("nand d4", EBU, xrx300_pins_nand_d4), + GRP_MUX("nand d3", EBU, xrx300_pins_nand_d3), + GRP_MUX("nand cs0", EBU, xrx300_pins_nand_cs0), + GRP_MUX("nand wr", EBU, xrx300_pins_nand_wr), + GRP_MUX("nand wp", EBU, xrx300_pins_nand_wp), + GRP_MUX("nand se", EBU, xrx300_pins_nand_se), + GRP_MUX("spi_di", SPI, xrx300_pins_spi_di), + GRP_MUX("spi_do", SPI, xrx300_pins_spi_do), + GRP_MUX("spi_clk", SPI, xrx300_pins_spi_clk), + GRP_MUX("spi_cs1", SPI, xrx300_pins_spi_cs1), + GRP_MUX("spi_cs4", SPI, xrx300_pins_spi_cs4), + GRP_MUX("spi_cs6", SPI, xrx300_pins_spi_cs6), + GRP_MUX("usif uart_rx", USIF, xrx300_pins_usif_uart_rx), + GRP_MUX("usif uart_tx", USIF, xrx300_pins_usif_uart_tx), + GRP_MUX("usif spi_di", USIF, xrx300_pins_usif_spi_di), + GRP_MUX("usif spi_do", USIF, xrx300_pins_usif_spi_do), + GRP_MUX("usif spi_clk", USIF, xrx300_pins_usif_spi_clk), + GRP_MUX("usif spi_cs0", USIF, xrx300_pins_usif_spi_cs0), + GRP_MUX("stp", STP, xrx300_pins_stp), + GRP_MUX("clkout2", CGU, xrx300_pins_clkout2), + GRP_MUX("mdio", MDIO, xrx300_pins_mdio), + GRP_MUX("dfe led0", DFE, xrx300_pins_dfe_led0), + GRP_MUX("dfe led1", DFE, xrx300_pins_dfe_led1), + GRP_MUX("ephy0 led0", GPHY, xrx300_pins_ephy0_led0), + GRP_MUX("ephy0 led1", GPHY, xrx300_pins_ephy0_led1), + GRP_MUX("ephy1 led0", GPHY, xrx300_pins_ephy1_led0), + GRP_MUX("ephy1 led1", GPHY, xrx300_pins_ephy1_led1), +}; + +static const char * const xrx300_spi_grps[] = {"spi_di", "spi_do", + "spi_clk", "spi_cs1", + "spi_cs4", "spi_cs6"}; +static const char * const xrx300_cgu_grps[] = {"clkout2"}; +static const char * const xrx300_ebu_grps[] = {"nand ale", "nand cs1", + "nand cle", "nand rdy", + "nand rd", "nand d1", + "nand d0", "nand d2", + "nand d7", "nand d6", + "nand d5", "nand d4", + "nand d3", "nand cs0", + "nand wr", "nand wp", + "nand se"}; +static const char * const xrx300_exin_grps[] = {"exin0", "exin1", "exin2", + "exin4", "exin5"}; +static const char * const xrx300_usif_grps[] = {"usif uart_rx", "usif uart_tx", + "usif spi_di", "usif spi_do", + "usif spi_clk", "usif spi_cs0"}; +static const char * const xrx300_stp_grps[] = {"stp"}; +static const char * const xrx300_mdio_grps[] = {"mdio"}; +static const char * const xrx300_dfe_grps[] = {"dfe led0", "dfe led1"}; +static const char * const xrx300_gphy_grps[] = {"ephy0 led0", "ephy0 led1", + "ephy1 led0", "ephy1 led1"}; + +static const struct ltq_pmx_func xrx300_funcs[] = { + {"spi", ARRAY_AND_SIZE(xrx300_spi_grps)}, + {"usif", ARRAY_AND_SIZE(xrx300_usif_grps)}, + {"cgu", ARRAY_AND_SIZE(xrx300_cgu_grps)}, + {"exin", ARRAY_AND_SIZE(xrx300_exin_grps)}, + {"stp", ARRAY_AND_SIZE(xrx300_stp_grps)}, + {"ebu", ARRAY_AND_SIZE(xrx300_ebu_grps)}, + {"mdio", ARRAY_AND_SIZE(xrx300_mdio_grps)}, + {"dfe", ARRAY_AND_SIZE(xrx300_dfe_grps)}, + {"ephy", ARRAY_AND_SIZE(xrx300_gphy_grps)}, +}; + /* --------- pinconf related code --------- */ static int xway_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin, @@ -676,6 +1563,10 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val) { struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev); + if (PORT(pin) == PORT3) + gpio_setbit(info->membase[0], GPIO3_OD, PORT_PIN(pin)); + else + gpio_setbit(info->membase[0], GPIO_OD(pin), PORT_PIN(pin)); gpio_setbit(info->membase[0], GPIO_DIR(pin), PORT_PIN(pin)); xway_gpio_set(chip, pin, val); @@ -695,10 +1586,7 @@ static struct gpio_chip xway_chip = { /* --------- register the pinctrl layer --------- */ -static const unsigned xway_exin_pin_map[] = {GPIO0, GPIO1, GPIO2, GPIO39, GPIO46, GPIO9}; -static const unsigned ase_exin_pins_map[] = {GPIO6, GPIO29, GPIO0}; - -static struct pinctrl_xway_soc { +struct pinctrl_xway_soc { int pin_count; const struct ltq_mfp_pin *mfp; const struct ltq_pin_group *grps; @@ -707,22 +1595,54 @@ static struct pinctrl_xway_soc { unsigned int num_funcs; const unsigned *exin; unsigned int num_exin; -} soc_cfg[] = { - /* legacy xway */ - {XWAY_MAX_PIN, xway_mfp, - xway_grps, ARRAY_SIZE(xway_grps), - danube_funcs, ARRAY_SIZE(danube_funcs), - xway_exin_pin_map, 3}, - /* xway xr9 series */ - {XR9_MAX_PIN, xway_mfp, - xway_grps, ARRAY_SIZE(xway_grps), - xrx_funcs, ARRAY_SIZE(xrx_funcs), - xway_exin_pin_map, 6}, - /* xway ase series */ - {XWAY_MAX_PIN, ase_mfp, - ase_grps, ARRAY_SIZE(ase_grps), - ase_funcs, ARRAY_SIZE(ase_funcs), - ase_exin_pins_map, 3}, +}; + +/* xway xr9 series (DEPRECATED: Use XWAY xRX100/xRX200 Family) */ +static struct pinctrl_xway_soc xr9_pinctrl = { + XR9_MAX_PIN, xway_mfp, + xway_grps, ARRAY_SIZE(xway_grps), + xrx_funcs, ARRAY_SIZE(xrx_funcs), + xway_exin_pin_map, 6 +}; + +/* XWAY AMAZON Family */ +static struct pinctrl_xway_soc ase_pinctrl = { + ASE_MAX_PIN, ase_mfp, + ase_grps, ARRAY_SIZE(ase_grps), + ase_funcs, ARRAY_SIZE(ase_funcs), + ase_exin_pin_map, 3 +}; + +/* XWAY DANUBE Family */ +static struct pinctrl_xway_soc danube_pinctrl = { + DANUBE_MAX_PIN, danube_mfp, + danube_grps, ARRAY_SIZE(danube_grps), + danube_funcs, ARRAY_SIZE(danube_funcs), + danube_exin_pin_map, 3 +}; + +/* XWAY xRX100 Family */ +static struct pinctrl_xway_soc xrx100_pinctrl = { + XRX100_MAX_PIN, xrx100_mfp, + xrx100_grps, ARRAY_SIZE(xrx100_grps), + xrx100_funcs, ARRAY_SIZE(xrx100_funcs), + xrx100_exin_pin_map, 6 +}; + +/* XWAY xRX200 Family */ +static struct pinctrl_xway_soc xrx200_pinctrl = { + XRX200_MAX_PIN, xrx200_mfp, + xrx200_grps, ARRAY_SIZE(xrx200_grps), + xrx200_funcs, ARRAY_SIZE(xrx200_funcs), + xrx200_exin_pin_map, 6 +}; + +/* XWAY xRX300 Family */ +static struct pinctrl_xway_soc xrx300_pinctrl = { + XRX300_MAX_PIN, xrx300_mfp, + xrx300_grps, ARRAY_SIZE(xrx300_grps), + xrx300_funcs, ARRAY_SIZE(xrx300_funcs), + xrx300_exin_pin_map, 5 }; static struct pinctrl_gpio_range xway_gpio_range = { @@ -731,9 +1651,14 @@ static struct pinctrl_gpio_range xway_gpio_range = { }; static const struct of_device_id xway_match[] = { - { .compatible = "lantiq,pinctrl-xway", .data = &soc_cfg[0]}, - { .compatible = "lantiq,pinctrl-xr9", .data = &soc_cfg[1]}, - { .compatible = "lantiq,pinctrl-ase", .data = &soc_cfg[2]}, + { .compatible = "lantiq,pinctrl-xway", .data = &danube_pinctrl}, /*DEPRECATED*/ + { .compatible = "lantiq,pinctrl-xr9", .data = &xr9_pinctrl}, /*DEPRECATED*/ + { .compatible = "lantiq,pinctrl-ase", .data = &ase_pinctrl}, /*DEPRECATED*/ + { .compatible = "lantiq,ase-pinctrl", .data = &ase_pinctrl}, + { .compatible = "lantiq,danube-pinctrl", .data = &danube_pinctrl}, + { .compatible = "lantiq,xrx100-pinctrl", .data = &xrx100_pinctrl}, + { .compatible = "lantiq,xrx200-pinctrl", .data = &xrx200_pinctrl}, + { .compatible = "lantiq,xrx300-pinctrl", .data = &xrx300_pinctrl}, {}, }; MODULE_DEVICE_TABLE(of, xway_match); @@ -755,7 +1680,7 @@ static int pinmux_xway_probe(struct platform_device *pdev) if (match) xway_soc = (const struct pinctrl_xway_soc *) match->data; else - xway_soc = &soc_cfg[0]; + xway_soc = &danube_pinctrl; /* find out how many pads we have */ xway_chip.ngpio = xway_soc->pin_count; diff --git a/drivers/pinctrl/pxa/Kconfig b/drivers/pinctrl/pxa/Kconfig new file mode 100644 index 000000000000..990667ff772c --- /dev/null +++ b/drivers/pinctrl/pxa/Kconfig @@ -0,0 +1,17 @@ +if (ARCH_PXA || COMPILE_TEST) + +config PINCTRL_PXA + bool + select PINMUX + select PINCONF + select GENERIC_PINCONF + +config PINCTRL_PXA27X + tristate "Marvell PXA27x pin controller driver" + select PINCTRL_PXA + default y if PXA27x + help + This is the pinctrl, pinmux, pinconf driver for the Marvell + PXA2xx block found in the pxa25x and pxa27x platforms. + +endif diff --git a/drivers/pinctrl/pxa/Makefile b/drivers/pinctrl/pxa/Makefile new file mode 100644 index 000000000000..f1d56af2bfc0 --- /dev/null +++ b/drivers/pinctrl/pxa/Makefile @@ -0,0 +1,2 @@ +# Marvell PXA pin control drivers +obj-$(CONFIG_PINCTRL_PXA27X) += pinctrl-pxa2xx.o pinctrl-pxa27x.o diff --git a/drivers/pinctrl/pxa/pinctrl-pxa27x.c b/drivers/pinctrl/pxa/pinctrl-pxa27x.c new file mode 100644 index 000000000000..2e2c3709ef05 --- /dev/null +++ b/drivers/pinctrl/pxa/pinctrl-pxa27x.c @@ -0,0 +1,566 @@ +/* + * Marvell PXA27x family pin control + * + * Copyright (C) 2015 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + */ +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-pxa2xx.h" + +static const struct pxa_desc_pin pxa27x_pins[] = { + PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(0)), + PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(1)), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(9), + PXA_FUNCTION(0, 3, "FFCTS"), + PXA_FUNCTION(1, 1, "HZ_CLK"), + PXA_FUNCTION(1, 3, "CHOUT<0>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(10), + PXA_FUNCTION(0, 1, "FFDCD"), + PXA_FUNCTION(0, 3, "USB_P3_5"), + PXA_FUNCTION(1, 1, "HZ_CLK"), + PXA_FUNCTION(1, 3, "CHOUT<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(11), + PXA_FUNCTION(0, 1, "EXT_SYNC<0>"), + PXA_FUNCTION(0, 2, "SSPRXD2"), + PXA_FUNCTION(0, 3, "USB_P3_1"), + PXA_FUNCTION(1, 1, "CHOUT<0>"), + PXA_FUNCTION(1, 1, "PWM_OUT<2>"), + PXA_FUNCTION(1, 3, "48_MHz")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(12), + PXA_FUNCTION(0, 1, "EXT_SYNC<1>"), + PXA_FUNCTION(0, 2, "CIF_DD<7>"), + PXA_FUNCTION(1, 1, "CHOUT<1>"), + PXA_FUNCTION(1, 1, "PWM_OUT<3>"), + PXA_FUNCTION(1, 3, "48_MHz")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(13), + PXA_FUNCTION(0, 1, "CLK_EXT"), + PXA_FUNCTION(0, 2, "KP_DKIN<7>"), + PXA_FUNCTION(0, 3, "KP_MKIN<7>"), + PXA_FUNCTION(1, 1, "SSPTXD2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(14), + PXA_FUNCTION(0, 1, "L_VSYNC"), + PXA_FUNCTION(0, 2, "SSPSFRM2"), + PXA_FUNCTION(1, 1, "SSPSFRM2"), + PXA_FUNCTION(1, 3, "UCLK")), + PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(15)), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(16), + PXA_FUNCTION(0, 1, "KP_MKIN<5>"), + PXA_FUNCTION(1, 2, "PWM_OUT<0>"), + PXA_FUNCTION(1, 3, "FFTXD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(17), + PXA_FUNCTION(0, 1, "KP_MKIN<6>"), + PXA_FUNCTION(0, 2, "CIF_DD<6>"), + PXA_FUNCTION(1, 2, "PWM_OUT<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(18), + PXA_FUNCTION(0, 1, "RDY")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(19), + PXA_FUNCTION(0, 1, "SSPSCLK2"), + PXA_FUNCTION(0, 3, "FFRXD"), + PXA_FUNCTION(1, 1, "SSPSCLK2"), + PXA_FUNCTION(1, 2, "L_CS"), + PXA_FUNCTION(1, 3, "nURST")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(20), + PXA_FUNCTION(0, 1, "DREQ<0>"), + PXA_FUNCTION(0, 2, "MBREQ"), + PXA_FUNCTION(1, 1, "nSDCS<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(21), + PXA_FUNCTION(1, 1, "nSDCS<3>"), + PXA_FUNCTION(1, 2, "DVAL<0>"), + PXA_FUNCTION(1, 3, "MBGNT")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(22), + PXA_FUNCTION(0, 1, "SSPEXTCLK2"), + PXA_FUNCTION(0, 2, "SSPSCLKEN2"), + PXA_FUNCTION(0, 3, "SSPSCLK2"), + PXA_FUNCTION(1, 1, "KP_MKOUT<7>"), + PXA_FUNCTION(1, 2, "SSPSYSCLK2"), + PXA_FUNCTION(1, 3, "SSPSCLK2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(23), + PXA_FUNCTION(0, 2, "SSPSCLK"), + PXA_FUNCTION(1, 1, "CIF_MCLK"), + PXA_FUNCTION(1, 1, "SSPSCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(24), + PXA_FUNCTION(0, 1, "CIF_FV"), + PXA_FUNCTION(0, 2, "SSPSFRM"), + PXA_FUNCTION(1, 1, "CIF_FV"), + PXA_FUNCTION(1, 2, "SSPSFRM")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(25), + PXA_FUNCTION(0, 1, "CIF_LV"), + PXA_FUNCTION(1, 1, "CIF_LV"), + PXA_FUNCTION(1, 2, "SSPTXD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(26), + PXA_FUNCTION(0, 1, "SSPRXD"), + PXA_FUNCTION(0, 2, "CIF_PCLK"), + PXA_FUNCTION(0, 3, "FFCTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(27), + PXA_FUNCTION(0, 1, "SSPEXTCLK"), + PXA_FUNCTION(0, 2, "SSPSCLKEN"), + PXA_FUNCTION(0, 3, "CIF_DD<0>"), + PXA_FUNCTION(1, 1, "SSPSYSCLK"), + PXA_FUNCTION(1, 3, "FFRTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(28), + PXA_FUNCTION(0, 1, "AC97_BITCLK"), + PXA_FUNCTION(0, 2, "I2S_BITCLK"), + PXA_FUNCTION(0, 3, "SSPSFRM"), + PXA_FUNCTION(1, 1, "I2S_BITCLK"), + PXA_FUNCTION(1, 3, "SSPSFRM")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(29), + PXA_FUNCTION(0, 1, "AC97_SDATA_IN_0"), + PXA_FUNCTION(0, 2, "I2S_SDATA_IN"), + PXA_FUNCTION(0, 3, "SSPSCLK"), + PXA_FUNCTION(1, 1, "SSPRXD2"), + PXA_FUNCTION(1, 3, "SSPSCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(30), + PXA_FUNCTION(1, 1, "I2S_SDATA_OUT"), + PXA_FUNCTION(1, 2, "AC97_SDATA_OUT"), + PXA_FUNCTION(1, 3, "USB_P3_2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(31), + PXA_FUNCTION(1, 1, "I2S_SYNC"), + PXA_FUNCTION(1, 2, "AC97_SYNC"), + PXA_FUNCTION(1, 3, "USB_P3_6")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(32), + PXA_FUNCTION(1, 1, "MSSCLK"), + PXA_FUNCTION(1, 2, "MMCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(33), + PXA_FUNCTION(0, 1, "FFRXD"), + PXA_FUNCTION(0, 2, "FFDSR"), + PXA_FUNCTION(1, 1, "DVAL<1>"), + PXA_FUNCTION(1, 2, "nCS<5>"), + PXA_FUNCTION(1, 3, "MBGNT")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(34), + PXA_FUNCTION(0, 1, "FFRXD"), + PXA_FUNCTION(0, 2, "KP_MKIN<3>"), + PXA_FUNCTION(0, 3, "SSPSCLK3"), + PXA_FUNCTION(1, 1, "USB_P2_2"), + PXA_FUNCTION(1, 3, "SSPSCLK3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(35), + PXA_FUNCTION(0, 1, "FFCTS"), + PXA_FUNCTION(0, 2, "USB_P2_1"), + PXA_FUNCTION(0, 3, "SSPSFRM3"), + PXA_FUNCTION(1, 2, "KP_MKOUT<6>"), + PXA_FUNCTION(1, 3, "SSPTXD3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(36), + PXA_FUNCTION(0, 1, "FFDCD"), + PXA_FUNCTION(0, 2, "SSPSCLK2"), + PXA_FUNCTION(0, 3, "KP_MKIN<7>"), + PXA_FUNCTION(1, 1, "USB_P2_4"), + PXA_FUNCTION(1, 2, "SSPSCLK2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(37), + PXA_FUNCTION(0, 1, "FFDSR"), + PXA_FUNCTION(0, 2, "SSPSFRM2"), + PXA_FUNCTION(0, 3, "KP_MKIN<3>"), + PXA_FUNCTION(1, 1, "USB_P2_8"), + PXA_FUNCTION(1, 2, "SSPSFRM2"), + PXA_FUNCTION(1, 3, "FFTXD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(38), + PXA_FUNCTION(0, 1, "FFRI"), + PXA_FUNCTION(0, 2, "KP_MKIN<4>"), + PXA_FUNCTION(0, 3, "USB_P2_3"), + PXA_FUNCTION(1, 1, "SSPTXD3"), + PXA_FUNCTION(1, 2, "SSPTXD2"), + PXA_FUNCTION(1, 3, "PWM_OUT<0>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(39), + PXA_FUNCTION(0, 1, "KP_MKIN<4>"), + PXA_FUNCTION(0, 3, "SSPSFRM3"), + PXA_FUNCTION(1, 1, "USB_P2_6"), + PXA_FUNCTION(1, 2, "FFTXD"), + PXA_FUNCTION(1, 3, "SSPSFRM3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(40), + PXA_FUNCTION(0, 1, "SSPRXD2"), + PXA_FUNCTION(0, 3, "USB_P2_5"), + PXA_FUNCTION(1, 1, "KP_MKOUT<6>"), + PXA_FUNCTION(1, 2, "FFDTR"), + PXA_FUNCTION(1, 3, "SSPSCLK3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(41), + PXA_FUNCTION(0, 1, "FFRXD"), + PXA_FUNCTION(0, 2, "USB_P2_7"), + PXA_FUNCTION(0, 3, "SSPRXD3"), + PXA_FUNCTION(1, 1, "KP_MKOUT<7>"), + PXA_FUNCTION(1, 2, "FFRTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(42), + PXA_FUNCTION(0, 1, "BTRXD"), + PXA_FUNCTION(0, 2, "ICP_RXD"), + PXA_FUNCTION(1, 3, "CIF_MCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(43), + PXA_FUNCTION(0, 3, "CIF_FV"), + PXA_FUNCTION(1, 1, "ICP_TXD"), + PXA_FUNCTION(1, 2, "BTTXD"), + PXA_FUNCTION(1, 3, "CIF_FV")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(44), + PXA_FUNCTION(0, 1, "BTCTS"), + PXA_FUNCTION(0, 3, "CIF_LV"), + PXA_FUNCTION(1, 3, "CIF_LV")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(45), + PXA_FUNCTION(0, 3, "CIF_PCLK"), + PXA_FUNCTION(1, 1, "AC97_SYSCLK"), + PXA_FUNCTION(1, 2, "BTRTS"), + PXA_FUNCTION(1, 3, "SSPSYSCLK3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(46), + PXA_FUNCTION(0, 1, "ICP_RXD"), + PXA_FUNCTION(0, 2, "STD_RXD"), + PXA_FUNCTION(1, 2, "PWM_OUT<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(47), + PXA_FUNCTION(0, 1, "CIF_DD<0>"), + PXA_FUNCTION(1, 1, "STD_TXD"), + PXA_FUNCTION(1, 2, "ICP_TXD"), + PXA_FUNCTION(1, 3, "PWM_OUT<3>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(48), + PXA_FUNCTION(0, 1, "CIF_DD<5>"), + PXA_FUNCTION(1, 1, "BB_OB_DAT<1>"), + PXA_FUNCTION(1, 2, "nPOE")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(49), + PXA_FUNCTION(1, 2, "nPWE")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(50), + PXA_FUNCTION(0, 1, "CIF_DD<3>"), + PXA_FUNCTION(0, 3, "SSPSCLK2"), + PXA_FUNCTION(1, 1, "BB_OB_DAT<2>"), + PXA_FUNCTION(1, 2, "nPIOR"), + PXA_FUNCTION(1, 3, "SSPSCLK2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(51), + PXA_FUNCTION(0, 1, "CIF_DD<2>"), + PXA_FUNCTION(1, 1, "BB_OB_DAT<3>"), + PXA_FUNCTION(1, 2, "nPIOW")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(52), + PXA_FUNCTION(0, 1, "CIF_DD<4>"), + PXA_FUNCTION(0, 2, "SSPSCLK3"), + PXA_FUNCTION(1, 1, "BB_OB_CLK"), + PXA_FUNCTION(1, 2, "SSPSCLK3")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(53), + PXA_FUNCTION(0, 1, "FFRXD"), + PXA_FUNCTION(0, 2, "USB_P2_3"), + PXA_FUNCTION(1, 1, "BB_OB_STB"), + PXA_FUNCTION(1, 2, "CIF_MCLK"), + PXA_FUNCTION(1, 3, "SSPSYSCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(54), + PXA_FUNCTION(0, 2, "BB_OB_WAIT"), + PXA_FUNCTION(0, 3, "CIF_PCLK"), + PXA_FUNCTION(1, 2, "nPCE<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(55), + PXA_FUNCTION(0, 1, "CIF_DD<1>"), + PXA_FUNCTION(0, 2, "BB_IB_DAT<1>"), + PXA_FUNCTION(1, 2, "nPREG")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(56), + PXA_FUNCTION(0, 1, "nPWAIT"), + PXA_FUNCTION(0, 2, "BB_IB_DAT<2>"), + PXA_FUNCTION(1, 1, "USB_P3_4")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(57), + PXA_FUNCTION(0, 1, "nIOS16"), + PXA_FUNCTION(0, 2, "BB_IB_DAT<3>"), + PXA_FUNCTION(1, 3, "SSPTXD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(58), + PXA_FUNCTION(0, 2, "LDD<0>"), + PXA_FUNCTION(1, 2, "LDD<0>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(59), + PXA_FUNCTION(0, 2, "LDD<1>"), + PXA_FUNCTION(1, 2, "LDD<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(60), + PXA_FUNCTION(0, 2, "LDD<2>"), + PXA_FUNCTION(1, 2, "LDD<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(61), + PXA_FUNCTION(0, 2, "LDD<3>"), + PXA_FUNCTION(1, 2, "LDD<3>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(62), + PXA_FUNCTION(0, 2, "LDD<4>"), + PXA_FUNCTION(1, 2, "LDD<4>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(63), + PXA_FUNCTION(0, 2, "LDD<5>"), + PXA_FUNCTION(1, 2, "LDD<5>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(64), + PXA_FUNCTION(0, 2, "LDD<6>"), + PXA_FUNCTION(1, 2, "LDD<6>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(65), + PXA_FUNCTION(0, 2, "LDD<7>"), + PXA_FUNCTION(1, 2, "LDD<7>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(66), + PXA_FUNCTION(0, 2, "LDD<8>"), + PXA_FUNCTION(1, 2, "LDD<8>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(67), + PXA_FUNCTION(0, 2, "LDD<9>"), + PXA_FUNCTION(1, 2, "LDD<9>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(68), + PXA_FUNCTION(0, 2, "LDD<10>"), + PXA_FUNCTION(1, 2, "LDD<10>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(69), + PXA_FUNCTION(0, 2, "LDD<11>"), + PXA_FUNCTION(1, 2, "LDD<11>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(70), + PXA_FUNCTION(0, 2, "LDD<12>"), + PXA_FUNCTION(1, 2, "LDD<12>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(71), + PXA_FUNCTION(0, 2, "LDD<13>"), + PXA_FUNCTION(1, 2, "LDD<13>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(72), + PXA_FUNCTION(0, 2, "LDD<14>"), + PXA_FUNCTION(1, 2, "LDD<14>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(73), + PXA_FUNCTION(0, 2, "LDD<15>"), + PXA_FUNCTION(1, 2, "LDD<15>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(74), + PXA_FUNCTION(1, 2, "L_FCLK_RD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(75), + PXA_FUNCTION(1, 2, "L_LCLK_A0")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(76), + PXA_FUNCTION(1, 2, "L_PCLK_WR")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(77), + PXA_FUNCTION(1, 2, "L_BIAS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(78), + PXA_FUNCTION(1, 1, "nPCE<2>"), + PXA_FUNCTION(1, 2, "nCS<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(79), + PXA_FUNCTION(1, 1, "PSKTSEL"), + PXA_FUNCTION(1, 2, "nCS<3>"), + PXA_FUNCTION(1, 3, "PWM_OUT<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(80), + PXA_FUNCTION(0, 1, "DREQ<1>"), + PXA_FUNCTION(0, 2, "MBREQ"), + PXA_FUNCTION(1, 2, "nCS<4>"), + PXA_FUNCTION(1, 3, "PWM_OUT<3>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(81), + PXA_FUNCTION(0, 2, "CIF_DD<0>"), + PXA_FUNCTION(1, 1, "SSPTXD3"), + PXA_FUNCTION(1, 2, "BB_OB_DAT<0>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(82), + PXA_FUNCTION(0, 1, "SSPRXD3"), + PXA_FUNCTION(0, 2, "BB_IB_DAT<0>"), + PXA_FUNCTION(0, 3, "CIF_DD<5>"), + PXA_FUNCTION(1, 3, "FFDTR")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(83), + PXA_FUNCTION(0, 1, "SSPSFRM3"), + PXA_FUNCTION(0, 2, "BB_IB_CLK"), + PXA_FUNCTION(0, 3, "CIF_DD<5>"), + PXA_FUNCTION(1, 1, "SSPSFRM3"), + PXA_FUNCTION(1, 2, "FFTXD"), + PXA_FUNCTION(1, 3, "FFRTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(84), + PXA_FUNCTION(0, 1, "SSPCLK3"), + PXA_FUNCTION(0, 2, "BB_IB_STB"), + PXA_FUNCTION(0, 3, "CIF_FV"), + PXA_FUNCTION(1, 1, "SSPCLK3"), + PXA_FUNCTION(1, 3, "CIF_FV")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(85), + PXA_FUNCTION(0, 1, "FFRXD"), + PXA_FUNCTION(0, 2, "DREQ<2>"), + PXA_FUNCTION(0, 3, "CIF_LV"), + PXA_FUNCTION(1, 1, "nPCE<1>"), + PXA_FUNCTION(1, 2, "BB_IB_WAIT"), + PXA_FUNCTION(1, 3, "CIF_LV")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(86), + PXA_FUNCTION(0, 1, "SSPRXD2"), + PXA_FUNCTION(0, 2, "LDD<16>"), + PXA_FUNCTION(0, 3, "USB_P3_5"), + PXA_FUNCTION(1, 1, "nPCE<1>"), + PXA_FUNCTION(1, 2, "LDD<16>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(87), + PXA_FUNCTION(0, 1, "nPCE<2>"), + PXA_FUNCTION(0, 2, "LDD<17>"), + PXA_FUNCTION(0, 3, "USB_P3_1"), + PXA_FUNCTION(1, 1, "SSPTXD2"), + PXA_FUNCTION(1, 2, "LDD<17>"), + PXA_FUNCTION(1, 3, "SSPSFRM2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(88), + PXA_FUNCTION(0, 1, "USBHPWR<1>"), + PXA_FUNCTION(0, 2, "SSPRXD2"), + PXA_FUNCTION(0, 3, "SSPSFRM2"), + PXA_FUNCTION(1, 2, "SSPTXD2"), + PXA_FUNCTION(1, 3, "SSPSFRM2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(89), + PXA_FUNCTION(0, 1, "SSPRXD3"), + PXA_FUNCTION(0, 3, "FFRI"), + PXA_FUNCTION(1, 1, "AC97_SYSCLK"), + PXA_FUNCTION(1, 2, "USBHPEN<1>"), + PXA_FUNCTION(1, 3, "SSPTXD2")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(90), + PXA_FUNCTION(0, 1, "KP_MKIN<5>"), + PXA_FUNCTION(0, 3, "USB_P3_5"), + PXA_FUNCTION(1, 1, "CIF_DD<4>"), + PXA_FUNCTION(1, 2, "nURST")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(91), + PXA_FUNCTION(0, 1, "KP_MKIN<6>"), + PXA_FUNCTION(0, 3, "USB_P3_1"), + PXA_FUNCTION(1, 1, "CIF_DD<5>"), + PXA_FUNCTION(1, 2, "UCLK")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(92), + PXA_FUNCTION(0, 1, "MMDAT<0>"), + PXA_FUNCTION(1, 1, "MMDAT<0>"), + PXA_FUNCTION(1, 2, "MSBS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(93), + PXA_FUNCTION(0, 1, "KP_DKIN<0>"), + PXA_FUNCTION(0, 2, "CIF_DD<6>"), + PXA_FUNCTION(1, 1, "AC97_SDATA_OUT")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(94), + PXA_FUNCTION(0, 1, "KP_DKIN<1>"), + PXA_FUNCTION(0, 2, "CIF_DD<5>"), + PXA_FUNCTION(1, 1, "AC97_SYNC")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(95), + PXA_FUNCTION(0, 1, "KP_DKIN<2>"), + PXA_FUNCTION(0, 2, "CIF_DD<4>"), + PXA_FUNCTION(0, 3, "KP_MKIN<6>"), + PXA_FUNCTION(1, 1, "AC97_RESET_n")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(96), + PXA_FUNCTION(0, 1, "KP_DKIN<3>"), + PXA_FUNCTION(0, 2, "MBREQ"), + PXA_FUNCTION(0, 3, "FFRXD"), + PXA_FUNCTION(1, 2, "DVAL<1>"), + PXA_FUNCTION(1, 3, "KP_MKOUT<6>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(97), + PXA_FUNCTION(0, 1, "KP_DKIN<4>"), + PXA_FUNCTION(0, 2, "DREQ<1>"), + PXA_FUNCTION(0, 3, "KP_MKIN<3>"), + PXA_FUNCTION(1, 2, "MBGNT")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(98), + PXA_FUNCTION(0, 1, "KP_DKIN<5>"), + PXA_FUNCTION(0, 2, "CIF_DD<0>"), + PXA_FUNCTION(0, 3, "KP_MKIN<4>"), + PXA_FUNCTION(1, 1, "AC97_SYSCLK"), + PXA_FUNCTION(1, 3, "FFRTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(99), + PXA_FUNCTION(0, 1, "KP_DKIN<6>"), + PXA_FUNCTION(0, 2, "AC97_SDATA_IN_1"), + PXA_FUNCTION(0, 3, "KP_MKIN<5>"), + PXA_FUNCTION(1, 3, "FFTXD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(100), + PXA_FUNCTION(0, 1, "KP_MKIN<0>"), + PXA_FUNCTION(0, 2, "DREQ<2>"), + PXA_FUNCTION(0, 3, "FFCTS")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(101), + PXA_FUNCTION(0, 1, "KP_MKIN<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(102), + PXA_FUNCTION(0, 1, "KP_MKIN<2>"), + PXA_FUNCTION(0, 3, "FFRXD"), + PXA_FUNCTION(1, 1, "nPCE<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(103), + PXA_FUNCTION(0, 1, "CIF_DD<3>"), + PXA_FUNCTION(1, 2, "KP_MKOUT<0>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(104), + PXA_FUNCTION(0, 1, "CIF_DD<2>"), + PXA_FUNCTION(1, 1, "PSKTSEL"), + PXA_FUNCTION(1, 2, "KP_MKOUT<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(105), + PXA_FUNCTION(0, 1, "CIF_DD<1>"), + PXA_FUNCTION(1, 1, "nPCE<2>"), + PXA_FUNCTION(1, 2, "KP_MKOUT<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(106), + PXA_FUNCTION(0, 1, "CIF_DD<9>"), + PXA_FUNCTION(1, 2, "KP_MKOUT<3>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(107), + PXA_FUNCTION(0, 1, "CIF_DD<8>"), + PXA_FUNCTION(1, 2, "KP_MKOUT<4>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(108), + PXA_FUNCTION(0, 1, "CIF_DD<7>"), + PXA_FUNCTION(1, 1, "CHOUT<0>"), + PXA_FUNCTION(1, 2, "KP_MKOUT<5>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(109), + PXA_FUNCTION(0, 1, "MMDAT<1>"), + PXA_FUNCTION(0, 2, "MSSDIO"), + PXA_FUNCTION(1, 1, "MMDAT<1>"), + PXA_FUNCTION(1, 2, "MSSDIO")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(110), + PXA_FUNCTION(0, 1, "MMDAT<2>"), + PXA_FUNCTION(1, 1, "MMDAT<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(111), + PXA_FUNCTION(0, 1, "MMDAT<3>"), + PXA_FUNCTION(1, 1, "MMDAT<3>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(112), + PXA_FUNCTION(0, 1, "MMCMD"), + PXA_FUNCTION(0, 2, "nMSINS"), + PXA_FUNCTION(1, 1, "MMCMD")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(113), + PXA_FUNCTION(0, 3, "USB_P3_3"), + PXA_FUNCTION(1, 1, "I2S_SYSCLK"), + PXA_FUNCTION(1, 2, "AC97_RESET_n")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(114), + PXA_FUNCTION(0, 1, "CIF_DD<1>"), + PXA_FUNCTION(1, 1, "UEN"), + PXA_FUNCTION(1, 2, "UVS0")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(115), + PXA_FUNCTION(0, 1, "DREQ<0>"), + PXA_FUNCTION(0, 2, "CIF_DD<3>"), + PXA_FUNCTION(0, 3, "MBREQ"), + PXA_FUNCTION(1, 1, "UEN"), + PXA_FUNCTION(1, 2, "nUVS1"), + PXA_FUNCTION(1, 3, "PWM_OUT<1>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(116), + PXA_FUNCTION(0, 1, "CIF_DD<2>"), + PXA_FUNCTION(0, 2, "AC97_SDATA_IN_0"), + PXA_FUNCTION(0, 3, "UDET"), + PXA_FUNCTION(1, 1, "DVAL<0>"), + PXA_FUNCTION(1, 2, "nUVS2"), + PXA_FUNCTION(1, 3, "MBGNT")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(117), + PXA_FUNCTION(0, 1, "SCL"), + PXA_FUNCTION(1, 1, "SCL")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(118), + PXA_FUNCTION(0, 1, "SDA"), + PXA_FUNCTION(1, 1, "SDA")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(119), + PXA_FUNCTION(0, 1, "USBHPWR<2>")), + PXA_GPIO_PIN(PXA_PINCTRL_PIN(120), + PXA_FUNCTION(1, 2, "USBHPEN<2>")), +}; + +static int pxa27x_pinctrl_probe(struct platform_device *pdev) +{ + int ret, i; + void __iomem *base_af[8]; + void __iomem *base_dir[4]; + void __iomem *base_sleep[4]; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base_af[0] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base_af[0])) + return PTR_ERR(base_af[0]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + base_dir[0] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base_dir[0])) + return PTR_ERR(base_dir[0]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + base_dir[3] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base_dir[3])) + return PTR_ERR(base_dir[3]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + base_sleep[0] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base_sleep[0])) + return PTR_ERR(base_sleep[0]); + + for (i = 0; i < ARRAY_SIZE(base_af); i++) + base_af[i] = base_af[0] + sizeof(base_af[0]) * i; + for (i = 0; i < 3; i++) + base_dir[i] = base_dir[0] + sizeof(base_dir[0]) * i; + for (i = 0; i < ARRAY_SIZE(base_sleep); i++) + base_sleep[i] = base_sleep[0] + sizeof(base_af[0]) * i; + + ret = pxa2xx_pinctrl_init(pdev, pxa27x_pins, ARRAY_SIZE(pxa27x_pins), + base_af, base_dir, base_sleep); + return ret; +} + +static const struct of_device_id pxa27x_pinctrl_match[] = { + { .compatible = "marvell,pxa27x-pinctrl", }, + {} +}; +MODULE_DEVICE_TABLE(of, pxa27x_pinctrl_match); + +static struct platform_driver pxa27x_pinctrl_driver = { + .probe = pxa27x_pinctrl_probe, + .driver = { + .name = "pxa27x-pinctrl", + .of_match_table = pxa27x_pinctrl_match, + }, +}; +module_platform_driver(pxa27x_pinctrl_driver); + +MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); +MODULE_DESCRIPTION("Marvell PXA27x pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c new file mode 100644 index 000000000000..d90e205cf809 --- /dev/null +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c @@ -0,0 +1,436 @@ +/* + * Marvell PXA2xx family pin control + * + * Copyright (C) 2015 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + */ + +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/module.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "../pinctrl-utils.h" +#include "pinctrl-pxa2xx.h" + +static int pxa2xx_pctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->ngroups; +} + +static const char *pxa2xx_pctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned tgroup) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_group *group = pctl->groups + tgroup; + + return group->name; +} + +static int pxa2xx_pctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned tgroup, + const unsigned **pins, + unsigned *num_pins) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_group *group = pctl->groups + tgroup; + + *pins = (unsigned *)&group->pin; + *num_pins = 1; + + return 0; +} + +static const struct pinctrl_ops pxa2xx_pctl_ops = { +#ifdef CONFIG_OF + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinctrl_utils_dt_free_map, +#endif + .get_groups_count = pxa2xx_pctrl_get_groups_count, + .get_group_name = pxa2xx_pctrl_get_group_name, + .get_group_pins = pxa2xx_pctrl_get_group_pins, +}; + +static struct pxa_desc_function * +pxa_desc_by_func_group(struct pxa_pinctrl *pctl, const char *pin_name, + const char *func_name) +{ + int i; + struct pxa_desc_function *df; + + for (i = 0; i < pctl->npins; i++) { + const struct pxa_desc_pin *pin = pctl->ppins + i; + + if (!strcmp(pin->pin.name, pin_name)) + for (df = pin->functions; df->name; df++) + if (!strcmp(df->name, func_name)) + return df; + } + + return NULL; +} + +static int pxa2xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned pin, + bool input) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + unsigned long flags; + uint32_t val; + void __iomem *gpdr; + + gpdr = pctl->base_gpdr[pin / 32]; + dev_dbg(pctl->dev, "set_direction(pin=%d): dir=%d\n", + pin, !input); + + spin_lock_irqsave(&pctl->lock, flags); + + val = readl_relaxed(gpdr); + val = (val & ~BIT(pin % 32)) | (input ? 0 : BIT(pin % 32)); + writel_relaxed(val, gpdr); + + spin_unlock_irqrestore(&pctl->lock, flags); + + return 0; +} + +static const char *pxa2xx_pmx_get_func_name(struct pinctrl_dev *pctldev, + unsigned function) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_function *pf = pctl->functions + function; + + return pf->name; +} + +static int pxa2xx_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->nfuncs; +} + +static int pxa2xx_pmx_get_func_groups(struct pinctrl_dev *pctldev, + unsigned function, + const char * const **groups, + unsigned * const num_groups) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_function *pf = pctl->functions + function; + + *groups = pf->groups; + *num_groups = pf->ngroups; + + return 0; +} + +static int pxa2xx_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned function, + unsigned tgroup) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_group *group = pctl->groups + tgroup; + struct pxa_desc_function *df; + int pin, shift; + unsigned long flags; + void __iomem *gafr, *gpdr; + u32 val; + + + df = pxa_desc_by_func_group(pctl, group->name, + (pctl->functions + function)->name); + if (!df) + return -EINVAL; + + pin = group->pin; + gafr = pctl->base_gafr[pin / 16]; + gpdr = pctl->base_gpdr[pin / 32]; + shift = (pin % 16) << 1; + dev_dbg(pctl->dev, "set_mux(pin=%d): af=%d dir=%d\n", + pin, df->muxval >> 1, df->muxval & 0x1); + + spin_lock_irqsave(&pctl->lock, flags); + + val = readl_relaxed(gafr); + val = (val & ~(0x3 << shift)) | ((df->muxval >> 1) << shift); + writel_relaxed(val, gafr); + + val = readl_relaxed(gpdr); + val = (val & ~BIT(pin % 32)) | ((df->muxval & 1) ? BIT(pin % 32) : 0); + writel_relaxed(val, gpdr); + + spin_unlock_irqrestore(&pctl->lock, flags); + + return 0; +} +static const struct pinmux_ops pxa2xx_pinmux_ops = { + .get_functions_count = pxa2xx_get_functions_count, + .get_function_name = pxa2xx_pmx_get_func_name, + .get_function_groups = pxa2xx_pmx_get_func_groups, + .set_mux = pxa2xx_pmx_set_mux, + .gpio_set_direction = pxa2xx_pmx_gpio_set_direction, +}; + +static int pxa2xx_pconf_group_get(struct pinctrl_dev *pctldev, + unsigned group, + unsigned long *config) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_group *g = pctl->groups + group; + unsigned long flags; + unsigned pin = g->pin; + void __iomem *pgsr = pctl->base_pgsr[pin / 32]; + u32 val; + + spin_lock_irqsave(&pctl->lock, flags); + val = readl_relaxed(pgsr) & BIT(pin % 32); + *config = val ? PIN_CONFIG_LOW_POWER_MODE : 0; + spin_unlock_irqrestore(&pctl->lock, flags); + + dev_dbg(pctl->dev, "get sleep gpio state(pin=%d) %d\n", + pin, !!val); + return 0; +} + +static int pxa2xx_pconf_group_set(struct pinctrl_dev *pctldev, + unsigned group, + unsigned long *configs, + unsigned num_configs) +{ + struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pxa_pinctrl_group *g = pctl->groups + group; + unsigned long flags; + unsigned pin = g->pin; + void __iomem *pgsr = pctl->base_pgsr[pin / 32]; + int i, is_set = 0; + u32 val; + + for (i = 0; i < num_configs; i++) { + switch (pinconf_to_config_param(configs[i])) { + case PIN_CONFIG_LOW_POWER_MODE: + is_set = pinconf_to_config_argument(configs[i]); + break; + default: + return -EINVAL; + } + } + + dev_dbg(pctl->dev, "set sleep gpio state(pin=%d) %d\n", + pin, is_set); + + spin_lock_irqsave(&pctl->lock, flags); + val = readl_relaxed(pgsr); + val = (val & ~BIT(pin % 32)) | (is_set ? BIT(pin % 32) : 0); + writel_relaxed(val, pgsr); + spin_unlock_irqrestore(&pctl->lock, flags); + + return 0; +} + +static const struct pinconf_ops pxa2xx_pconf_ops = { + .pin_config_group_get = pxa2xx_pconf_group_get, + .pin_config_group_set = pxa2xx_pconf_group_set, + .is_generic = true, +}; + +static struct pinctrl_desc pxa2xx_pinctrl_desc = { + .confops = &pxa2xx_pconf_ops, + .pctlops = &pxa2xx_pctl_ops, + .pmxops = &pxa2xx_pinmux_ops, +}; + +static const struct pxa_pinctrl_function * +pxa2xx_find_function(struct pxa_pinctrl *pctl, const char *fname, + const struct pxa_pinctrl_function *functions) +{ + const struct pxa_pinctrl_function *func; + + for (func = functions; func->name; func++) + if (!strcmp(fname, func->name)) + return func; + + return NULL; +} + +static int pxa2xx_build_functions(struct pxa_pinctrl *pctl) +{ + int i; + struct pxa_pinctrl_function *functions; + struct pxa_desc_function *df; + + /* + * Each pin can have at most 6 alternate functions, and 2 gpio functions + * which are common to each pin. As there are more than 2 pins without + * alternate function, 6 * npins is an absolute high limit of the number + * of functions. + */ + functions = devm_kcalloc(pctl->dev, pctl->npins * 6, + sizeof(*functions), GFP_KERNEL); + if (!functions) + return -ENOMEM; + + for (i = 0; i < pctl->npins; i++) + for (df = pctl->ppins[i].functions; df->name; df++) + if (!pxa2xx_find_function(pctl, df->name, functions)) + (functions + pctl->nfuncs++)->name = df->name; + pctl->functions = devm_kmemdup(pctl->dev, functions, + pctl->nfuncs * sizeof(*functions), + GFP_KERNEL); + if (!pctl->functions) + return -ENOMEM; + + devm_kfree(pctl->dev, functions); + return 0; +} + +static int pxa2xx_build_groups(struct pxa_pinctrl *pctl) +{ + int i, j, ngroups; + struct pxa_pinctrl_function *func; + struct pxa_desc_function *df; + char **gtmp; + + gtmp = devm_kmalloc_array(pctl->dev, pctl->npins, sizeof(*gtmp), + GFP_KERNEL); + if (!gtmp) + return -ENOMEM; + + for (i = 0; i < pctl->nfuncs; i++) { + ngroups = 0; + for (j = 0; j < pctl->npins; j++) + for (df = pctl->ppins[j].functions; df->name; + df++) + if (!strcmp(pctl->functions[i].name, + df->name)) + gtmp[ngroups++] = (char *) + pctl->ppins[j].pin.name; + func = pctl->functions + i; + func->ngroups = ngroups; + func->groups = + devm_kmalloc_array(pctl->dev, ngroups, + sizeof(char *), GFP_KERNEL); + if (!func->groups) + return -ENOMEM; + + memcpy(func->groups, gtmp, ngroups * sizeof(*gtmp)); + } + + devm_kfree(pctl->dev, gtmp); + return 0; +} + +static int pxa2xx_build_state(struct pxa_pinctrl *pctl, + const struct pxa_desc_pin *ppins, int npins) +{ + struct pxa_pinctrl_group *group; + struct pinctrl_pin_desc *pins; + int ret, i; + + pctl->npins = npins; + pctl->ppins = ppins; + pctl->ngroups = npins; + + pctl->desc.npins = npins; + pins = devm_kcalloc(pctl->dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + pctl->desc.pins = pins; + for (i = 0; i < npins; i++) + pins[i] = ppins[i].pin; + + pctl->groups = devm_kmalloc_array(pctl->dev, pctl->ngroups, + sizeof(*pctl->groups), GFP_KERNEL); + if (!pctl->groups) + return -ENOMEM; + + for (i = 0; i < npins; i++) { + group = pctl->groups + i; + group->name = ppins[i].pin.name; + group->pin = ppins[i].pin.number; + } + + ret = pxa2xx_build_functions(pctl); + if (ret) + return ret; + + ret = pxa2xx_build_groups(pctl); + if (ret) + return ret; + + return 0; +} + +int pxa2xx_pinctrl_init(struct platform_device *pdev, + const struct pxa_desc_pin *ppins, int npins, + void __iomem *base_gafr[], void __iomem *base_gpdr[], + void __iomem *base_pgsr[]) +{ + struct pxa_pinctrl *pctl; + int ret, i, maxpin = 0; + + for (i = 0; i < npins; i++) + maxpin = max_t(int, ppins[i].pin.number, maxpin); + + pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); + if (!pctl) + return -ENOMEM; + pctl->base_gafr = devm_kcalloc(&pdev->dev, roundup(maxpin, 16), + sizeof(*pctl->base_gafr), GFP_KERNEL); + pctl->base_gpdr = devm_kcalloc(&pdev->dev, roundup(maxpin, 32), + sizeof(*pctl->base_gpdr), GFP_KERNEL); + pctl->base_pgsr = devm_kcalloc(&pdev->dev, roundup(maxpin, 32), + sizeof(*pctl->base_pgsr), GFP_KERNEL); + if (!pctl->base_gafr || !pctl->base_gpdr || !pctl->base_pgsr) + return -ENOMEM; + + platform_set_drvdata(pdev, pctl); + spin_lock_init(&pctl->lock); + + pctl->dev = &pdev->dev; + pctl->desc = pxa2xx_pinctrl_desc; + pctl->desc.name = dev_name(&pdev->dev); + pctl->desc.owner = THIS_MODULE; + + for (i = 0; i < roundup(maxpin, 16); i += 16) + pctl->base_gafr[i / 16] = base_gafr[i / 16]; + for (i = 0; i < roundup(maxpin, 32); i += 32) { + pctl->base_gpdr[i / 32] = base_gpdr[i / 32]; + pctl->base_pgsr[i / 32] = base_pgsr[i / 32]; + } + + ret = pxa2xx_build_state(pctl, ppins, npins); + if (ret) + return ret; + + pctl->pctl_dev = pinctrl_register(&pctl->desc, &pdev->dev, pctl); + if (IS_ERR(pctl->pctl_dev)) { + dev_err(&pdev->dev, "couldn't register pinctrl driver\n"); + return PTR_ERR(pctl->pctl_dev); + } + + dev_info(&pdev->dev, "initialized pxa2xx pinctrl driver\n"); + + return 0; +} + +int pxa2xx_pinctrl_exit(struct platform_device *pdev) +{ + struct pxa_pinctrl *pctl = platform_get_drvdata(pdev); + + pinctrl_unregister(pctl->pctl_dev); + return 0; +} diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.h b/drivers/pinctrl/pxa/pinctrl-pxa2xx.h new file mode 100644 index 000000000000..8be1e0b79751 --- /dev/null +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.h @@ -0,0 +1,92 @@ +/* + * Marvell PXA2xx family pin control + * + * Copyright (C) 2015 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + */ + +#ifndef __PINCTRL_PXA_H +#define __PINCTRL_PXA_H + +#define PXA_FUNCTION(_dir, _af, _name) \ + { \ + .name = _name, \ + .muxval = (_dir | (_af << 1)), \ + } + +#define PXA_PIN(_pin, funcs...) \ + { \ + .pin = _pin, \ + .functions = (struct pxa_desc_function[]){ \ + funcs, { } }, \ + } + +#define PXA_GPIO_PIN(_pin, funcs...) \ + { \ + .pin = _pin, \ + .functions = (struct pxa_desc_function[]){ \ + PXA_FUNCTION(0, 0, "gpio_in"), \ + PXA_FUNCTION(1, 0, "gpio_out"), \ + funcs, { } }, \ + } + +#define PXA_GPIO_ONLY_PIN(_pin) \ + { \ + .pin = _pin, \ + .functions = (struct pxa_desc_function[]){ \ + PXA_FUNCTION(0, 0, "gpio_in"), \ + PXA_FUNCTION(1, 0, "gpio_out"), \ + { } }, \ + } + +#define PXA_PINCTRL_PIN(pin) \ + PINCTRL_PIN(pin, "P" #pin) + +struct pxa_desc_function { + const char *name; + u8 muxval; +}; + +struct pxa_desc_pin { + struct pinctrl_pin_desc pin; + struct pxa_desc_function *functions; +}; + +struct pxa_pinctrl_group { + const char *name; + unsigned pin; +}; + +struct pxa_pinctrl_function { + const char *name; + const char **groups; + unsigned ngroups; +}; + +struct pxa_pinctrl { + spinlock_t lock; + void __iomem **base_gafr; + void __iomem **base_gpdr; + void __iomem **base_pgsr; + struct device *dev; + struct pinctrl_desc desc; + struct pinctrl_dev *pctl_dev; + unsigned npins; + const struct pxa_desc_pin *ppins; + unsigned ngroups; + struct pxa_pinctrl_group *groups; + unsigned nfuncs; + struct pxa_pinctrl_function *functions; + char *name; +}; + +int pxa2xx_pinctrl_init(struct platform_device *pdev, + const struct pxa_desc_pin *ppins, int npins, + void __iomem *base_gafr[], void __iomem *base_gpdr[], + void __iomem *base_gpsr[]); + +#endif /* __PINCTRL_PXA_H */ diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 383263a92e59..eeac8cba8a21 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -63,6 +63,14 @@ config PINCTRL_MSM8916 This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm TLMM block found on the Qualcomm 8916 platform. +config PINCTRL_MSM8996 + tristate "Qualcomm MSM8996 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm MSM8996 platform. + config PINCTRL_QDF2XXX tristate "Qualcomm Technologies QDF2xxx pin controller driver" depends on GPIOLIB && ACPI diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 13b190e72c21..dfb50a9fe04a 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o +obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c new file mode 100644 index 000000000000..c257927bea05 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c @@ -0,0 +1,1942 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_BASE 0x0 +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_BASE + REG_SIZE * id, \ + .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \ + .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc msm8996_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "SDC1_CLK"), + PINCTRL_PIN(151, "SDC1_CMD"), + PINCTRL_PIN(152, "SDC1_DATA"), + PINCTRL_PIN(153, "SDC2_CLK"), + PINCTRL_PIN(154, "SDC2_CMD"), + PINCTRL_PIN(155, "SDC2_DATA"), + PINCTRL_PIN(156, "SDC1_RCLK"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); + +static const unsigned int sdc1_clk_pins[] = { 150 }; +static const unsigned int sdc1_cmd_pins[] = { 151 }; +static const unsigned int sdc1_data_pins[] = { 152 }; +static const unsigned int sdc2_clk_pins[] = { 153 }; +static const unsigned int sdc2_cmd_pins[] = { 154 }; +static const unsigned int sdc2_data_pins[] = { 155 }; +static const unsigned int sdc1_rclk_pins[] = { 156 }; + +enum msm8996_functions { + msm_mux_adsp_ext, + msm_mux_atest_bbrx0, + msm_mux_atest_bbrx1, + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_gpsadc0, + msm_mux_atest_gpsadc1, + msm_mux_atest_tsens, + msm_mux_atest_tsens2, + msm_mux_atest_usb1, + msm_mux_atest_usb10, + msm_mux_atest_usb11, + msm_mux_atest_usb12, + msm_mux_atest_usb13, + msm_mux_atest_usb2, + msm_mux_atest_usb20, + msm_mux_atest_usb21, + msm_mux_atest_usb22, + msm_mux_atest_usb23, + msm_mux_audio_ref, + msm_mux_bimc_dte0, + msm_mux_bimc_dte1, + msm_mux_blsp10_spi, + msm_mux_blsp11_i2c_scl_b, + msm_mux_blsp11_i2c_sda_b, + msm_mux_blsp11_uart_rx_b, + msm_mux_blsp11_uart_tx_b, + msm_mux_blsp1_spi, + msm_mux_blsp2_spi, + msm_mux_blsp_i2c1, + msm_mux_blsp_i2c10, + msm_mux_blsp_i2c11, + msm_mux_blsp_i2c12, + msm_mux_blsp_i2c2, + msm_mux_blsp_i2c3, + msm_mux_blsp_i2c4, + msm_mux_blsp_i2c5, + msm_mux_blsp_i2c6, + msm_mux_blsp_i2c7, + msm_mux_blsp_i2c8, + msm_mux_blsp_i2c9, + msm_mux_blsp_spi1, + msm_mux_blsp_spi10, + msm_mux_blsp_spi11, + msm_mux_blsp_spi12, + msm_mux_blsp_spi2, + msm_mux_blsp_spi3, + msm_mux_blsp_spi4, + msm_mux_blsp_spi5, + msm_mux_blsp_spi6, + msm_mux_blsp_spi7, + msm_mux_blsp_spi8, + msm_mux_blsp_spi9, + msm_mux_blsp_uart1, + msm_mux_blsp_uart10, + msm_mux_blsp_uart11, + msm_mux_blsp_uart12, + msm_mux_blsp_uart2, + msm_mux_blsp_uart3, + msm_mux_blsp_uart4, + msm_mux_blsp_uart5, + msm_mux_blsp_uart6, + msm_mux_blsp_uart7, + msm_mux_blsp_uart8, + msm_mux_blsp_uart9, + msm_mux_blsp_uim1, + msm_mux_blsp_uim10, + msm_mux_blsp_uim11, + msm_mux_blsp_uim12, + msm_mux_blsp_uim2, + msm_mux_blsp_uim3, + msm_mux_blsp_uim4, + msm_mux_blsp_uim5, + msm_mux_blsp_uim6, + msm_mux_blsp_uim7, + msm_mux_blsp_uim8, + msm_mux_blsp_uim9, + msm_mux_btfm_slimbus, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c, + msm_mux_cci_timer0, + msm_mux_cci_timer1, + msm_mux_cci_timer2, + msm_mux_cci_timer3, + msm_mux_cci_timer4, + msm_mux_cri_trng, + msm_mux_cri_trng0, + msm_mux_cri_trng1, + msm_mux_dac_calib0, + msm_mux_dac_calib1, + msm_mux_dac_calib10, + msm_mux_dac_calib11, + msm_mux_dac_calib12, + msm_mux_dac_calib13, + msm_mux_dac_calib14, + msm_mux_dac_calib15, + msm_mux_dac_calib16, + msm_mux_dac_calib17, + msm_mux_dac_calib18, + msm_mux_dac_calib19, + msm_mux_dac_calib2, + msm_mux_dac_calib20, + msm_mux_dac_calib21, + msm_mux_dac_calib22, + msm_mux_dac_calib23, + msm_mux_dac_calib24, + msm_mux_dac_calib25, + msm_mux_dac_calib26, + msm_mux_dac_calib3, + msm_mux_dac_calib4, + msm_mux_dac_calib5, + msm_mux_dac_calib6, + msm_mux_dac_calib7, + msm_mux_dac_calib8, + msm_mux_dac_calib9, + msm_mux_dac_gpio, + msm_mux_dbg_out, + msm_mux_ddr_bist, + msm_mux_edp_hot, + msm_mux_edp_lcd, + msm_mux_gcc_gp1_clk_a, + msm_mux_gcc_gp1_clk_b, + msm_mux_gcc_gp2_clk_a, + msm_mux_gcc_gp2_clk_b, + msm_mux_gcc_gp3_clk_a, + msm_mux_gcc_gp3_clk_b, + msm_mux_gsm_tx, + msm_mux_hdmi_cec, + msm_mux_hdmi_ddc, + msm_mux_hdmi_hot, + msm_mux_hdmi_rcv, + msm_mux_isense_dbg, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_lpass_slimbus, + msm_mux_m_voc, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync_p_b, + msm_mux_mdp_vsync_s_b, + msm_mux_modem_tsync, + msm_mux_mss_lte, + msm_mux_nav_dr, + msm_mux_nav_pps, + msm_mux_pa_indicator, + msm_mux_pci_e0, + msm_mux_pci_e1, + msm_mux_pci_e2, + msm_mux_pll_bypassnl, + msm_mux_pll_reset, + msm_mux_pri_mi2s, + msm_mux_prng_rosc, + msm_mux_pwr_crypto, + msm_mux_pwr_modem, + msm_mux_pwr_nav, + msm_mux_qdss_cti, + msm_mux_qdss_cti_trig_in_a, + msm_mux_qdss_cti_trig_in_b, + msm_mux_qdss_cti_trig_out_a, + msm_mux_qdss_cti_trig_out_b, + msm_mux_qdss_stm0, + msm_mux_qdss_stm1, + msm_mux_qdss_stm10, + msm_mux_qdss_stm11, + msm_mux_qdss_stm12, + msm_mux_qdss_stm13, + msm_mux_qdss_stm14, + msm_mux_qdss_stm15, + msm_mux_qdss_stm16, + msm_mux_qdss_stm17, + msm_mux_qdss_stm18, + msm_mux_qdss_stm19, + msm_mux_qdss_stm2, + msm_mux_qdss_stm20, + msm_mux_qdss_stm21, + msm_mux_qdss_stm22, + msm_mux_qdss_stm23, + msm_mux_qdss_stm24, + msm_mux_qdss_stm25, + msm_mux_qdss_stm26, + msm_mux_qdss_stm27, + msm_mux_qdss_stm28, + msm_mux_qdss_stm29, + msm_mux_qdss_stm3, + msm_mux_qdss_stm30, + msm_mux_qdss_stm31, + msm_mux_qdss_stm4, + msm_mux_qdss_stm5, + msm_mux_qdss_stm6, + msm_mux_qdss_stm7, + msm_mux_qdss_stm8, + msm_mux_qdss_stm9, + msm_mux_qdss_traceclk_a, + msm_mux_qdss_traceclk_b, + msm_mux_qdss_tracectl_a, + msm_mux_qdss_tracectl_b, + msm_mux_qdss_tracedata_11, + msm_mux_qdss_tracedata_12, + msm_mux_qdss_tracedata_a, + msm_mux_qdss_tracedata_b, + msm_mux_qspi0, + msm_mux_qspi1, + msm_mux_qspi2, + msm_mux_qspi3, + msm_mux_qspi_clk, + msm_mux_qspi_cs, + msm_mux_qua_mi2s, + msm_mux_sd_card, + msm_mux_sd_write, + msm_mux_sdc40, + msm_mux_sdc41, + msm_mux_sdc42, + msm_mux_sdc43, + msm_mux_sdc4_clk, + msm_mux_sdc4_cmd, + msm_mux_sec_mi2s, + msm_mux_spkr_i2s, + msm_mux_ssbi1, + msm_mux_ssbi2, + msm_mux_ssc_irq, + msm_mux_ter_mi2s, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_tsif1_clk, + msm_mux_tsif1_data, + msm_mux_tsif1_en, + msm_mux_tsif1_error, + msm_mux_tsif1_sync, + msm_mux_tsif2_clk, + msm_mux_tsif2_data, + msm_mux_tsif2_en, + msm_mux_tsif2_error, + msm_mux_tsif2_sync, + msm_mux_uim1, + msm_mux_uim2, + msm_mux_uim3, + msm_mux_uim4, + msm_mux_uim_batt, + msm_mux_vfr_1, + msm_mux_gpio, + msm_mux_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146", + "gpio147", "gpio148", "gpio149" +}; + + +static const char * const blsp_uart1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; +static const char * const blsp_spi1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; +static const char * const blsp_i2c1_groups[] = { + "gpio2", "gpio3", +}; +static const char * const blsp_uim1_groups[] = { + "gpio0", "gpio1", +}; +static const char * const atest_tsens_groups[] = { + "gpio3", +}; +static const char * const bimc_dte1_groups[] = { + "gpio3", "gpio5", +}; +static const char * const blsp_spi8_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", +}; +static const char * const blsp_uart8_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", +}; +static const char * const blsp_uim8_groups[] = { + "gpio4", "gpio5", +}; +static const char * const qdss_cti_trig_out_b_groups[] = { + "gpio4", +}; +static const char * const dac_calib0_groups[] = { + "gpio4", "gpio41", +}; +static const char * const bimc_dte0_groups[] = { + "gpio4", "gpio6", +}; +static const char * const qdss_cti_trig_in_b_groups[] = { + "gpio5", +}; +static const char * const dac_calib1_groups[] = { + "gpio5", "gpio42", +}; +static const char * const dac_calib2_groups[] = { + "gpio6", "gpio43", +}; +static const char * const atest_tsens2_groups[] = { + "gpio7", +}; +static const char * const blsp_spi10_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const blsp_uart10_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const blsp_uim10_groups[] = { + "gpio8", "gpio9", +}; +static const char * const atest_bbrx1_groups[] = { + "gpio8", +}; +static const char * const atest_usb12_groups[] = { + "gpio9", +}; +static const char * const mdp_vsync_groups[] = { + "gpio10", "gpio11", "gpio12", +}; +static const char * const edp_lcd_groups[] = { + "gpio10", +}; +static const char * const blsp_i2c10_groups[] = { + "gpio10", "gpio11", +}; +static const char * const atest_usb11_groups[] = { + "gpio10", +}; +static const char * const atest_gpsadc0_groups[] = { + "gpio11", +}; +static const char * const edp_hot_groups[] = { + "gpio11", +}; +static const char * const atest_usb10_groups[] = { + "gpio11", +}; +static const char * const m_voc_groups[] = { + "gpio12", +}; +static const char * const dac_gpio_groups[] = { + "gpio12", +}; +static const char * const atest_char_groups[] = { + "gpio12", +}; +static const char * const cam_mclk_groups[] = { + "gpio13", "gpio14", "gpio15", "gpio16", +}; +static const char * const pll_bypassnl_groups[] = { + "gpio13", +}; +static const char * const qdss_stm7_groups[] = { + "gpio13", +}; +static const char * const blsp_i2c8_groups[] = { + "gpio6", "gpio7", +}; +static const char * const atest_usb1_groups[] = { + "gpio7", +}; +static const char * const atest_usb13_groups[] = { + "gpio8", +}; +static const char * const atest_bbrx0_groups[] = { + "gpio9", +}; +static const char * const atest_gpsadc1_groups[] = { + "gpio10", +}; +static const char * const qdss_tracedata_b_groups[] = { + "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", + "gpio21", "gpio22", "gpio23", "gpio26", "gpio29", "gpio57", "gpio58", + "gpio92", "gpio93", +}; +static const char * const pll_reset_groups[] = { + "gpio14", +}; +static const char * const qdss_stm6_groups[] = { + "gpio14", +}; +static const char * const qdss_stm5_groups[] = { + "gpio15", +}; +static const char * const qdss_stm4_groups[] = { + "gpio16", +}; +static const char * const atest_usb2_groups[] = { + "gpio16", +}; +static const char * const dac_calib3_groups[] = { + "gpio17", "gpio44", +}; +static const char * const cci_i2c_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20", +}; +static const char * const qdss_stm3_groups[] = { + "gpio17", +}; +static const char * const atest_usb23_groups[] = { + "gpio17", +}; +static const char * const atest_char3_groups[] = { + "gpio17", +}; +static const char * const dac_calib4_groups[] = { + "gpio18", "gpio45", +}; +static const char * const qdss_stm2_groups[] = { + "gpio18", +}; +static const char * const atest_usb22_groups[] = { + "gpio18", +}; +static const char * const atest_char2_groups[] = { + "gpio18", +}; +static const char * const dac_calib5_groups[] = { + "gpio19", "gpio46", +}; +static const char * const qdss_stm1_groups[] = { + "gpio19", +}; +static const char * const atest_usb21_groups[] = { + "gpio19", +}; +static const char * const atest_char1_groups[] = { + "gpio19", +}; +static const char * const dac_calib6_groups[] = { + "gpio20", "gpio47", +}; +static const char * const dbg_out_groups[] = { + "gpio20", +}; +static const char * const qdss_stm0_groups[] = { + "gpio20", +}; +static const char * const atest_usb20_groups[] = { + "gpio20", +}; +static const char * const atest_char0_groups[] = { + "gpio20", +}; +static const char * const dac_calib7_groups[] = { + "gpio21", "gpio48", +}; +static const char * const cci_timer0_groups[] = { + "gpio21", +}; +static const char * const qdss_stm13_groups[] = { + "gpio21", +}; +static const char * const dac_calib8_groups[] = { + "gpio22", "gpio49", +}; +static const char * const cci_timer1_groups[] = { + "gpio22", +}; +static const char * const qdss_stm12_groups[] = { + "gpio22", +}; +static const char * const dac_calib9_groups[] = { + "gpio23", "gpio50", +}; +static const char * const cci_timer2_groups[] = { + "gpio23", +}; +static const char * const qdss_stm11_groups[] = { + "gpio23", +}; +static const char * const dac_calib10_groups[] = { + "gpio24", "gpio51", +}; +static const char * const cci_timer3_groups[] = { + "gpio24", +}; +static const char * const cci_async_groups[] = { + "gpio24", "gpio25", "gpio26", +}; +static const char * const blsp1_spi_groups[] = { + "gpio24", "gpio27", "gpio28", "gpio90", +}; +static const char * const qdss_stm10_groups[] = { + "gpio24", +}; +static const char * const qdss_cti_trig_in_a_groups[] = { + "gpio24", +}; +static const char * const dac_calib11_groups[] = { + "gpio25", "gpio52", +}; +static const char * const cci_timer4_groups[] = { + "gpio25", +}; +static const char * const blsp_spi6_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28", +}; +static const char * const blsp_uart6_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28", +}; +static const char * const blsp_uim6_groups[] = { + "gpio25", "gpio26", +}; +static const char * const blsp2_spi_groups[] = { + "gpio25", "gpio29", "gpio30", +}; +static const char * const qdss_stm9_groups[] = { + "gpio25", +}; +static const char * const qdss_cti_trig_out_a_groups[] = { + "gpio25", +}; +static const char * const dac_calib12_groups[] = { + "gpio26", "gpio53", +}; +static const char * const qdss_stm8_groups[] = { + "gpio26", +}; +static const char * const dac_calib13_groups[] = { + "gpio27", "gpio54", +}; +static const char * const blsp_i2c6_groups[] = { + "gpio27", "gpio28", +}; +static const char * const qdss_tracectl_a_groups[] = { + "gpio27", +}; +static const char * const dac_calib14_groups[] = { + "gpio28", "gpio55", +}; +static const char * const qdss_traceclk_a_groups[] = { + "gpio28", +}; +static const char * const dac_calib15_groups[] = { + "gpio29", "gpio56", +}; +static const char * const dac_calib16_groups[] = { + "gpio30", "gpio57", +}; +static const char * const hdmi_rcv_groups[] = { + "gpio30", +}; +static const char * const dac_calib17_groups[] = { + "gpio31", "gpio58", +}; +static const char * const pwr_modem_groups[] = { + "gpio31", +}; +static const char * const hdmi_cec_groups[] = { + "gpio31", +}; +static const char * const pwr_nav_groups[] = { + "gpio32", +}; +static const char * const dac_calib18_groups[] = { + "gpio32", "gpio59", +}; +static const char * const hdmi_ddc_groups[] = { + "gpio32", "gpio33", +}; +static const char * const pwr_crypto_groups[] = { + "gpio33", +}; +static const char * const dac_calib19_groups[] = { + "gpio33", "gpio60", +}; +static const char * const dac_calib20_groups[] = { + "gpio34", "gpio61", +}; +static const char * const hdmi_hot_groups[] = { + "gpio34", +}; +static const char * const dac_calib21_groups[] = { + "gpio35", "gpio62", +}; +static const char * const pci_e0_groups[] = { + "gpio35", "gpio36", +}; +static const char * const dac_calib22_groups[] = { + "gpio36", "gpio63", +}; +static const char * const dac_calib23_groups[] = { + "gpio37", "gpio64", +}; +static const char * const blsp_i2c2_groups[] = { + "gpio43", "gpio44", +}; +static const char * const blsp_spi3_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", +}; +static const char * const blsp_uart3_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", +}; +static const char * const blsp_uim3_groups[] = { + "gpio45", "gpio46", +}; +static const char * const blsp_i2c3_groups[] = { + "gpio47", "gpio48", +}; +static const char * const dac_calib24_groups[] = { + "gpio38", "gpio65", +}; +static const char * const dac_calib25_groups[] = { + "gpio39", "gpio66", +}; +static const char * const tsif1_sync_groups[] = { + "gpio39", +}; +static const char * const sd_write_groups[] = { + "gpio40", +}; +static const char * const tsif1_error_groups[] = { + "gpio40", +}; +static const char * const blsp_spi2_groups[] = { + "gpio41", "gpio42", "gpio43", "gpio44", +}; +static const char * const blsp_uart2_groups[] = { + "gpio41", "gpio42", "gpio43", "gpio44", +}; +static const char * const blsp_uim2_groups[] = { + "gpio41", "gpio42", +}; +static const char * const qdss_cti_groups[] = { + "gpio41", "gpio42", "gpio100", "gpio101", +}; +static const char * const uim3_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", +}; +static const char * const blsp_spi9_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", +}; +static const char * const blsp_uart9_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", +}; +static const char * const blsp_uim9_groups[] = { + "gpio49", "gpio50", +}; +static const char * const blsp10_spi_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", "gpio88", +}; +static const char * const blsp_i2c9_groups[] = { + "gpio51", "gpio52", +}; +static const char * const blsp_spi7_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56", +}; +static const char * const blsp_uart7_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56", +}; +static const char * const blsp_uim7_groups[] = { + "gpio53", "gpio54", +}; +static const char * const qdss_tracedata_a_groups[] = { + "gpio53", "gpio54", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", + "gpio74", "gpio75", "gpio76", "gpio77", "gpio85", "gpio86", "gpio87", + "gpio89", "gpio90", +}; +static const char * const blsp_i2c7_groups[] = { + "gpio55", "gpio56", +}; +static const char * const qua_mi2s_groups[] = { + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", +}; +static const char * const gcc_gp1_clk_a_groups[] = { + "gpio57", +}; +static const char * const uim4_groups[] = { + "gpio58", "gpio59", "gpio60", "gpio61", +}; +static const char * const blsp_spi11_groups[] = { + "gpio58", "gpio59", "gpio60", "gpio61", +}; +static const char * const blsp_uart11_groups[] = { + "gpio58", "gpio59", "gpio60", "gpio61", +}; +static const char * const blsp_uim11_groups[] = { + "gpio58", "gpio59", +}; +static const char * const gcc_gp2_clk_a_groups[] = { + "gpio58", +}; +static const char * const gcc_gp3_clk_a_groups[] = { + "gpio59", +}; +static const char * const blsp_i2c11_groups[] = { + "gpio60", "gpio61", +}; +static const char * const cri_trng0_groups[] = { + "gpio60", +}; +static const char * const cri_trng1_groups[] = { + "gpio61", +}; +static const char * const cri_trng_groups[] = { + "gpio62", +}; +static const char * const qdss_stm18_groups[] = { + "gpio63", +}; +static const char * const pri_mi2s_groups[] = { + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", +}; +static const char * const qdss_stm17_groups[] = { + "gpio64", +}; +static const char * const blsp_spi4_groups[] = { + "gpio65", "gpio66", "gpio67", "gpio68", +}; +static const char * const blsp_uart4_groups[] = { + "gpio65", "gpio66", "gpio67", "gpio68", +}; +static const char * const blsp_uim4_groups[] = { + "gpio65", "gpio66", +}; +static const char * const qdss_stm16_groups[] = { + "gpio65", +}; +static const char * const qdss_stm15_groups[] = { + "gpio66", +}; +static const char * const dac_calib26_groups[] = { + "gpio67", +}; +static const char * const blsp_i2c4_groups[] = { + "gpio67", "gpio68", +}; +static const char * const qdss_stm14_groups[] = { + "gpio67", +}; +static const char * const spkr_i2s_groups[] = { + "gpio69", "gpio70", "gpio71", "gpio72", +}; +static const char * const audio_ref_groups[] = { + "gpio69", +}; +static const char * const lpass_slimbus_groups[] = { + "gpio70", "gpio71", "gpio72", +}; +static const char * const isense_dbg_groups[] = { + "gpio70", +}; +static const char * const tsense_pwm1_groups[] = { + "gpio71", +}; +static const char * const tsense_pwm2_groups[] = { + "gpio71", +}; +static const char * const btfm_slimbus_groups[] = { + "gpio73", "gpio74", +}; +static const char * const ter_mi2s_groups[] = { + "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", +}; +static const char * const qdss_stm22_groups[] = { + "gpio74", +}; +static const char * const qdss_stm21_groups[] = { + "gpio75", +}; +static const char * const qdss_stm20_groups[] = { + "gpio76", +}; +static const char * const qdss_stm19_groups[] = { + "gpio77", +}; +static const char * const ssc_irq_groups[] = { + "gpio78", "gpio79", "gpio80", "gpio117", "gpio118", "gpio119", + "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125", +}; +static const char * const gcc_gp1_clk_b_groups[] = { + "gpio78", +}; +static const char * const sec_mi2s_groups[] = { + "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", +}; +static const char * const blsp_spi5_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio84", +}; +static const char * const blsp_uart5_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio84", +}; +static const char * const blsp_uim5_groups[] = { + "gpio81", "gpio82", +}; +static const char * const gcc_gp2_clk_b_groups[] = { + "gpio81", +}; +static const char * const gcc_gp3_clk_b_groups[] = { + "gpio82", +}; +static const char * const blsp_i2c5_groups[] = { + "gpio83", "gpio84", +}; +static const char * const blsp_spi12_groups[] = { + "gpio85", "gpio86", "gpio87", "gpio88", +}; +static const char * const blsp_uart12_groups[] = { + "gpio85", "gpio86", "gpio87", "gpio88", +}; +static const char * const blsp_uim12_groups[] = { + "gpio85", "gpio86", +}; +static const char * const qdss_stm25_groups[] = { + "gpio85", +}; +static const char * const qdss_stm31_groups[] = { + "gpio86", +}; +static const char * const blsp_i2c12_groups[] = { + "gpio87", "gpio88", +}; +static const char * const qdss_stm30_groups[] = { + "gpio87", +}; +static const char * const qdss_stm29_groups[] = { + "gpio88", +}; +static const char * const tsif1_clk_groups[] = { + "gpio89", +}; +static const char * const qdss_stm28_groups[] = { + "gpio89", +}; +static const char * const tsif1_en_groups[] = { + "gpio90", +}; +static const char * const tsif1_data_groups[] = { + "gpio91", +}; +static const char * const sdc4_cmd_groups[] = { + "gpio91", +}; +static const char * const qdss_stm27_groups[] = { + "gpio91", +}; +static const char * const qdss_traceclk_b_groups[] = { + "gpio91", +}; +static const char * const tsif2_error_groups[] = { + "gpio92", +}; +static const char * const sdc43_groups[] = { + "gpio92", +}; +static const char * const vfr_1_groups[] = { + "gpio92", +}; +static const char * const qdss_stm26_groups[] = { + "gpio92", +}; +static const char * const tsif2_clk_groups[] = { + "gpio93", +}; +static const char * const sdc4_clk_groups[] = { + "gpio93", +}; +static const char * const qdss_stm24_groups[] = { + "gpio93", +}; +static const char * const tsif2_en_groups[] = { + "gpio94", +}; +static const char * const sdc42_groups[] = { + "gpio94", +}; +static const char * const qdss_stm23_groups[] = { + "gpio94", +}; +static const char * const qdss_tracectl_b_groups[] = { + "gpio94", +}; +static const char * const sd_card_groups[] = { + "gpio95", +}; +static const char * const tsif2_data_groups[] = { + "gpio95", +}; +static const char * const sdc41_groups[] = { + "gpio95", +}; +static const char * const tsif2_sync_groups[] = { + "gpio96", +}; +static const char * const sdc40_groups[] = { + "gpio96", +}; +static const char * const mdp_vsync_p_b_groups[] = { + "gpio97", +}; +static const char * const ldo_en_groups[] = { + "gpio97", +}; +static const char * const mdp_vsync_s_b_groups[] = { + "gpio98", +}; +static const char * const ldo_update_groups[] = { + "gpio98", +}; +static const char * const blsp11_uart_tx_b_groups[] = { + "gpio100", +}; +static const char * const blsp11_uart_rx_b_groups[] = { + "gpio101", +}; +static const char * const blsp11_i2c_sda_b_groups[] = { + "gpio102", +}; +static const char * const prng_rosc_groups[] = { + "gpio102", +}; +static const char * const blsp11_i2c_scl_b_groups[] = { + "gpio103", +}; +static const char * const uim2_groups[] = { + "gpio105", "gpio106", "gpio107", "gpio108", +}; +static const char * const uim1_groups[] = { + "gpio109", "gpio110", "gpio111", "gpio112", +}; +static const char * const uim_batt_groups[] = { + "gpio113", +}; +static const char * const pci_e2_groups[] = { + "gpio114", "gpio115", "gpio116", +}; +static const char * const pa_indicator_groups[] = { + "gpio116", +}; +static const char * const adsp_ext_groups[] = { + "gpio118", +}; +static const char * const ddr_bist_groups[] = { + "gpio121", "gpio122", "gpio123", "gpio124", +}; +static const char * const qdss_tracedata_11_groups[] = { + "gpio123", +}; +static const char * const qdss_tracedata_12_groups[] = { + "gpio124", +}; +static const char * const modem_tsync_groups[] = { + "gpio128", +}; +static const char * const nav_dr_groups[] = { + "gpio128", +}; +static const char * const nav_pps_groups[] = { + "gpio128", +}; +static const char * const pci_e1_groups[] = { + "gpio130", "gpio131", "gpio132", +}; +static const char * const gsm_tx_groups[] = { + "gpio134", "gpio135", +}; +static const char * const qspi_cs_groups[] = { + "gpio138", "gpio141", +}; +static const char * const ssbi2_groups[] = { + "gpio139", +}; +static const char * const ssbi1_groups[] = { + "gpio140", +}; +static const char * const mss_lte_groups[] = { + "gpio144", "gpio145", +}; +static const char * const qspi_clk_groups[] = { + "gpio145", +}; +static const char * const qspi0_groups[] = { + "gpio146", +}; +static const char * const qspi1_groups[] = { + "gpio147", +}; +static const char * const qspi2_groups[] = { + "gpio148", +}; +static const char * const qspi3_groups[] = { + "gpio149", +}; + +static const struct msm_function msm8996_functions[] = { + FUNCTION(adsp_ext), + FUNCTION(atest_bbrx0), + FUNCTION(atest_bbrx1), + FUNCTION(atest_char), + FUNCTION(atest_char0), + FUNCTION(atest_char1), + FUNCTION(atest_char2), + FUNCTION(atest_char3), + FUNCTION(atest_gpsadc0), + FUNCTION(atest_gpsadc1), + FUNCTION(atest_tsens), + FUNCTION(atest_tsens2), + FUNCTION(atest_usb1), + FUNCTION(atest_usb10), + FUNCTION(atest_usb11), + FUNCTION(atest_usb12), + FUNCTION(atest_usb13), + FUNCTION(atest_usb2), + FUNCTION(atest_usb20), + FUNCTION(atest_usb21), + FUNCTION(atest_usb22), + FUNCTION(atest_usb23), + FUNCTION(audio_ref), + FUNCTION(bimc_dte0), + FUNCTION(bimc_dte1), + FUNCTION(blsp10_spi), + FUNCTION(blsp11_i2c_scl_b), + FUNCTION(blsp11_i2c_sda_b), + FUNCTION(blsp11_uart_rx_b), + FUNCTION(blsp11_uart_tx_b), + FUNCTION(blsp1_spi), + FUNCTION(blsp2_spi), + FUNCTION(blsp_i2c1), + FUNCTION(blsp_i2c10), + FUNCTION(blsp_i2c11), + FUNCTION(blsp_i2c12), + FUNCTION(blsp_i2c2), + FUNCTION(blsp_i2c3), + FUNCTION(blsp_i2c4), + FUNCTION(blsp_i2c5), + FUNCTION(blsp_i2c6), + FUNCTION(blsp_i2c7), + FUNCTION(blsp_i2c8), + FUNCTION(blsp_i2c9), + FUNCTION(blsp_spi1), + FUNCTION(blsp_spi10), + FUNCTION(blsp_spi11), + FUNCTION(blsp_spi12), + FUNCTION(blsp_spi2), + FUNCTION(blsp_spi3), + FUNCTION(blsp_spi4), + FUNCTION(blsp_spi5), + FUNCTION(blsp_spi6), + FUNCTION(blsp_spi7), + FUNCTION(blsp_spi8), + FUNCTION(blsp_spi9), + FUNCTION(blsp_uart1), + FUNCTION(blsp_uart10), + FUNCTION(blsp_uart11), + FUNCTION(blsp_uart12), + FUNCTION(blsp_uart2), + FUNCTION(blsp_uart3), + FUNCTION(blsp_uart4), + FUNCTION(blsp_uart5), + FUNCTION(blsp_uart6), + FUNCTION(blsp_uart7), + FUNCTION(blsp_uart8), + FUNCTION(blsp_uart9), + FUNCTION(blsp_uim1), + FUNCTION(blsp_uim10), + FUNCTION(blsp_uim11), + FUNCTION(blsp_uim12), + FUNCTION(blsp_uim2), + FUNCTION(blsp_uim3), + FUNCTION(blsp_uim4), + FUNCTION(blsp_uim5), + FUNCTION(blsp_uim6), + FUNCTION(blsp_uim7), + FUNCTION(blsp_uim8), + FUNCTION(blsp_uim9), + FUNCTION(btfm_slimbus), + FUNCTION(cam_mclk), + FUNCTION(cci_async), + FUNCTION(cci_i2c), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(cci_timer3), + FUNCTION(cci_timer4), + FUNCTION(cri_trng), + FUNCTION(cri_trng0), + FUNCTION(cri_trng1), + FUNCTION(dac_calib0), + FUNCTION(dac_calib1), + FUNCTION(dac_calib10), + FUNCTION(dac_calib11), + FUNCTION(dac_calib12), + FUNCTION(dac_calib13), + FUNCTION(dac_calib14), + FUNCTION(dac_calib15), + FUNCTION(dac_calib16), + FUNCTION(dac_calib17), + FUNCTION(dac_calib18), + FUNCTION(dac_calib19), + FUNCTION(dac_calib2), + FUNCTION(dac_calib20), + FUNCTION(dac_calib21), + FUNCTION(dac_calib22), + FUNCTION(dac_calib23), + FUNCTION(dac_calib24), + FUNCTION(dac_calib25), + FUNCTION(dac_calib26), + FUNCTION(dac_calib3), + FUNCTION(dac_calib4), + FUNCTION(dac_calib5), + FUNCTION(dac_calib6), + FUNCTION(dac_calib7), + FUNCTION(dac_calib8), + FUNCTION(dac_calib9), + FUNCTION(dac_gpio), + FUNCTION(dbg_out), + FUNCTION(ddr_bist), + FUNCTION(edp_hot), + FUNCTION(edp_lcd), + FUNCTION(gcc_gp1_clk_a), + FUNCTION(gcc_gp1_clk_b), + FUNCTION(gcc_gp2_clk_a), + FUNCTION(gcc_gp2_clk_b), + FUNCTION(gcc_gp3_clk_a), + FUNCTION(gcc_gp3_clk_b), + FUNCTION(gpio), + FUNCTION(gsm_tx), + FUNCTION(hdmi_cec), + FUNCTION(hdmi_ddc), + FUNCTION(hdmi_hot), + FUNCTION(hdmi_rcv), + FUNCTION(isense_dbg), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(lpass_slimbus), + FUNCTION(m_voc), + FUNCTION(mdp_vsync), + FUNCTION(mdp_vsync_p_b), + FUNCTION(mdp_vsync_s_b), + FUNCTION(modem_tsync), + FUNCTION(mss_lte), + FUNCTION(nav_dr), + FUNCTION(nav_pps), + FUNCTION(pa_indicator), + FUNCTION(pci_e0), + FUNCTION(pci_e1), + FUNCTION(pci_e2), + FUNCTION(pll_bypassnl), + FUNCTION(pll_reset), + FUNCTION(pri_mi2s), + FUNCTION(prng_rosc), + FUNCTION(pwr_crypto), + FUNCTION(pwr_modem), + FUNCTION(pwr_nav), + FUNCTION(qdss_cti), + FUNCTION(qdss_cti_trig_in_a), + FUNCTION(qdss_cti_trig_in_b), + FUNCTION(qdss_cti_trig_out_a), + FUNCTION(qdss_cti_trig_out_b), + FUNCTION(qdss_stm0), + FUNCTION(qdss_stm1), + FUNCTION(qdss_stm10), + FUNCTION(qdss_stm11), + FUNCTION(qdss_stm12), + FUNCTION(qdss_stm13), + FUNCTION(qdss_stm14), + FUNCTION(qdss_stm15), + FUNCTION(qdss_stm16), + FUNCTION(qdss_stm17), + FUNCTION(qdss_stm18), + FUNCTION(qdss_stm19), + FUNCTION(qdss_stm2), + FUNCTION(qdss_stm20), + FUNCTION(qdss_stm21), + FUNCTION(qdss_stm22), + FUNCTION(qdss_stm23), + FUNCTION(qdss_stm24), + FUNCTION(qdss_stm25), + FUNCTION(qdss_stm26), + FUNCTION(qdss_stm27), + FUNCTION(qdss_stm28), + FUNCTION(qdss_stm29), + FUNCTION(qdss_stm3), + FUNCTION(qdss_stm30), + FUNCTION(qdss_stm31), + FUNCTION(qdss_stm4), + FUNCTION(qdss_stm5), + FUNCTION(qdss_stm6), + FUNCTION(qdss_stm7), + FUNCTION(qdss_stm8), + FUNCTION(qdss_stm9), + FUNCTION(qdss_traceclk_a), + FUNCTION(qdss_traceclk_b), + FUNCTION(qdss_tracectl_a), + FUNCTION(qdss_tracectl_b), + FUNCTION(qdss_tracedata_11), + FUNCTION(qdss_tracedata_12), + FUNCTION(qdss_tracedata_a), + FUNCTION(qdss_tracedata_b), + FUNCTION(qspi0), + FUNCTION(qspi1), + FUNCTION(qspi2), + FUNCTION(qspi3), + FUNCTION(qspi_clk), + FUNCTION(qspi_cs), + FUNCTION(qua_mi2s), + FUNCTION(sd_card), + FUNCTION(sd_write), + FUNCTION(sdc40), + FUNCTION(sdc41), + FUNCTION(sdc42), + FUNCTION(sdc43), + FUNCTION(sdc4_clk), + FUNCTION(sdc4_cmd), + FUNCTION(sec_mi2s), + FUNCTION(spkr_i2s), + FUNCTION(ssbi1), + FUNCTION(ssbi2), + FUNCTION(ssc_irq), + FUNCTION(ter_mi2s), + FUNCTION(tsense_pwm1), + FUNCTION(tsense_pwm2), + FUNCTION(tsif1_clk), + FUNCTION(tsif1_data), + FUNCTION(tsif1_en), + FUNCTION(tsif1_error), + FUNCTION(tsif1_sync), + FUNCTION(tsif2_clk), + FUNCTION(tsif2_data), + FUNCTION(tsif2_en), + FUNCTION(tsif2_error), + FUNCTION(tsif2_sync), + FUNCTION(uim1), + FUNCTION(uim2), + FUNCTION(uim3), + FUNCTION(uim4), + FUNCTION(uim_batt), + FUNCTION(vfr_1), +}; + +static const struct msm_pingroup msm8996_groups[] = { + PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA, NA, NA), + PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA, NA, NA), + PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA), + PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, atest_tsens, + bimc_dte1, NA, NA, NA), + PINGROUP(4, blsp_spi8, blsp_uart8, blsp_uim8, NA, qdss_cti_trig_out_b, + dac_calib0, bimc_dte0, NA, NA), + PINGROUP(5, blsp_spi8, blsp_uart8, blsp_uim8, NA, qdss_cti_trig_in_b, + dac_calib1, bimc_dte1, NA, NA), + PINGROUP(6, blsp_spi8, blsp_uart8, blsp_i2c8, NA, dac_calib2, + bimc_dte0, NA, NA, NA), + PINGROUP(7, blsp_spi8, blsp_uart8, blsp_i2c8, NA, atest_tsens2, + atest_usb1, NA, NA, NA), + PINGROUP(8, blsp_spi10, blsp_uart10, blsp_uim10, NA, atest_bbrx1, + atest_usb13, NA, NA, NA), + PINGROUP(9, blsp_spi10, blsp_uart10, blsp_uim10, atest_bbrx0, + atest_usb12, NA, NA, NA, NA), + PINGROUP(10, mdp_vsync, blsp_spi10, blsp_uart10, blsp_i2c10, + atest_gpsadc1, atest_usb11, NA, NA, NA), + PINGROUP(11, mdp_vsync, blsp_spi10, blsp_uart10, blsp_i2c10, + atest_gpsadc0, atest_usb10, NA, NA, NA), + PINGROUP(12, mdp_vsync, m_voc, dac_gpio, atest_char, NA, NA, NA, NA, + NA), + PINGROUP(13, cam_mclk, pll_bypassnl, qdss_stm7, qdss_tracedata_b, NA, + NA, NA, NA, NA), + PINGROUP(14, cam_mclk, pll_reset, qdss_stm6, qdss_tracedata_b, NA, NA, + NA, NA, NA), + PINGROUP(15, cam_mclk, qdss_stm5, qdss_tracedata_b, NA, NA, NA, NA, NA, + NA), + PINGROUP(16, cam_mclk, qdss_stm4, qdss_tracedata_b, NA, atest_usb2, NA, + NA, NA, NA), + PINGROUP(17, cci_i2c, qdss_stm3, qdss_tracedata_b, dac_calib3, + atest_usb23, atest_char3, NA, NA, NA), + PINGROUP(18, cci_i2c, qdss_stm2, qdss_tracedata_b, dac_calib4, + atest_usb22, atest_char2, NA, NA, NA), + PINGROUP(19, cci_i2c, qdss_stm1, qdss_tracedata_b, dac_calib5, + atest_usb21, atest_char1, NA, NA, NA), + PINGROUP(20, cci_i2c, dbg_out, qdss_stm0, dac_calib6, atest_usb20, + atest_char0, NA, NA, NA), + PINGROUP(21, cci_timer0, qdss_stm13, qdss_tracedata_b, dac_calib7, NA, + NA, NA, NA, NA), + PINGROUP(22, cci_timer1, qdss_stm12, qdss_tracedata_b, dac_calib8, NA, + NA, NA, NA, NA), + PINGROUP(23, cci_timer2, blsp1_spi, qdss_stm11, qdss_tracedata_b, + dac_calib9, NA, NA, NA, NA), + PINGROUP(24, cci_timer3, cci_async, blsp1_spi, qdss_stm10, + qdss_cti_trig_in_a, dac_calib10, NA, NA, NA), + PINGROUP(25, cci_timer4, cci_async, blsp_spi6, blsp_uart6, blsp_uim6, + blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11), + PINGROUP(26, cci_async, blsp_spi6, blsp_uart6, blsp_uim6, qdss_stm8, + qdss_tracedata_b, dac_calib12, NA, NA), + PINGROUP(27, blsp_spi6, blsp_uart6, blsp_i2c6, blsp1_spi, + qdss_tracectl_a, dac_calib13, NA, NA, NA), + PINGROUP(28, blsp_spi6, blsp_uart6, blsp_i2c6, blsp1_spi, + qdss_traceclk_a, dac_calib14, NA, NA, NA), + PINGROUP(29, blsp2_spi, NA, qdss_tracedata_b, dac_calib15, NA, NA, NA, + NA, NA), + PINGROUP(30, hdmi_rcv, blsp2_spi, dac_calib16, NA, NA, NA, NA, NA, NA), + PINGROUP(31, hdmi_cec, pwr_modem, dac_calib17, NA, NA, NA, NA, NA, NA), + PINGROUP(32, hdmi_ddc, pwr_nav, NA, dac_calib18, NA, NA, NA, NA, NA), + PINGROUP(33, hdmi_ddc, pwr_crypto, NA, dac_calib19, NA, NA, NA, NA, NA), + PINGROUP(34, hdmi_hot, NA, dac_calib20, NA, NA, NA, NA, NA, NA), + PINGROUP(35, pci_e0, NA, dac_calib21, NA, NA, NA, NA, NA, NA), + PINGROUP(36, pci_e0, NA, dac_calib22, NA, NA, NA, NA, NA, NA), + PINGROUP(37, NA, dac_calib23, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(38, NA, dac_calib24, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(39, tsif1_sync, NA, dac_calib25, NA, NA, NA, NA, NA, NA), + PINGROUP(40, sd_write, tsif1_error, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(41, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti, + dac_calib0, NA, NA, NA), + PINGROUP(42, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti, + dac_calib1, NA, NA, NA), + PINGROUP(43, blsp_spi2, blsp_uart2, blsp_i2c2, NA, dac_calib2, NA, NA, + NA, NA), + PINGROUP(44, blsp_spi2, blsp_uart2, blsp_i2c2, NA, dac_calib3, NA, NA, + NA, NA), + PINGROUP(45, blsp_spi3, blsp_uart3, blsp_uim3, NA, dac_calib4, NA, NA, + NA, NA), + PINGROUP(46, blsp_spi3, blsp_uart3, blsp_uim3, NA, dac_calib5, NA, NA, + NA, NA), + PINGROUP(47, blsp_spi3, blsp_uart3, blsp_i2c3, dac_calib6, NA, NA, NA, + NA, NA), + PINGROUP(48, blsp_spi3, blsp_uart3, blsp_i2c3, dac_calib7, NA, NA, NA, + NA, NA), + PINGROUP(49, uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, + dac_calib8, NA, NA, NA), + PINGROUP(50, uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, + dac_calib9, NA, NA, NA), + PINGROUP(51, uim3, blsp_spi9, blsp_uart9, blsp_i2c9, blsp10_spi, + dac_calib10, NA, NA, NA), + PINGROUP(52, uim3, blsp_spi9, blsp_uart9, blsp_i2c9, + blsp10_spi, dac_calib11, NA, NA, NA), + PINGROUP(53, blsp_spi7, blsp_uart7, blsp_uim7, NA, qdss_tracedata_a, + dac_calib12, NA, NA, NA), + PINGROUP(54, blsp_spi7, blsp_uart7, blsp_uim7, NA, NA, + qdss_tracedata_a, dac_calib13, NA, NA), + PINGROUP(55, blsp_spi7, blsp_uart7, blsp_i2c7, NA, dac_calib14, NA, NA, + NA, NA), + PINGROUP(56, blsp_spi7, blsp_uart7, blsp_i2c7, NA, dac_calib15, NA, NA, + NA, NA), + PINGROUP(57, qua_mi2s, gcc_gp1_clk_a, NA, qdss_tracedata_b, + dac_calib16, NA, NA, NA, NA), + PINGROUP(58, qua_mi2s, uim4, blsp_spi11, blsp_uart11, blsp_uim11, + gcc_gp2_clk_a, NA, qdss_tracedata_b, dac_calib17), + PINGROUP(59, qua_mi2s, uim4, blsp_spi11, blsp_uart11, blsp_uim11, + gcc_gp3_clk_a, NA, dac_calib18, NA), + PINGROUP(60, qua_mi2s, uim4, blsp_spi11, blsp_uart11, blsp_i2c11, + cri_trng0, NA, dac_calib19, NA), + PINGROUP(61, qua_mi2s, uim4, blsp_spi11, blsp_uart11, + blsp_i2c11, cri_trng1, NA, dac_calib20, NA), + PINGROUP(62, qua_mi2s, cri_trng, NA, dac_calib21, NA, NA, NA, NA, NA), + PINGROUP(63, qua_mi2s, NA, NA, qdss_stm18, qdss_tracedata_a, + dac_calib22, NA, NA, NA), + PINGROUP(64, pri_mi2s, NA, qdss_stm17, qdss_tracedata_a, dac_calib23, + NA, NA, NA, NA), + PINGROUP(65, pri_mi2s, blsp_spi4, blsp_uart4, blsp_uim4, NA, + qdss_stm16, qdss_tracedata_a, dac_calib24, NA), + PINGROUP(66, pri_mi2s, blsp_spi4, blsp_uart4, blsp_uim4, NA, + qdss_stm15, qdss_tracedata_a, dac_calib25, NA), + PINGROUP(67, pri_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, qdss_stm14, + qdss_tracedata_a, dac_calib26, NA, NA), + PINGROUP(68, pri_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, + NA, NA), + PINGROUP(69, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(70, lpass_slimbus, spkr_i2s, isense_dbg, NA, NA, NA, NA, NA, + NA), + PINGROUP(71, lpass_slimbus, spkr_i2s, tsense_pwm1, tsense_pwm2, NA, NA, + NA, NA, NA), + PINGROUP(72, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, btfm_slimbus, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, btfm_slimbus, ter_mi2s, qdss_stm22, qdss_tracedata_a, NA, + NA, NA, NA, NA), + PINGROUP(75, ter_mi2s, qdss_stm21, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(76, ter_mi2s, qdss_stm20, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(77, ter_mi2s, qdss_stm19, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(78, ter_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(79, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b, + NA, NA, NA, NA), + PINGROUP(82, sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp3_clk_b, + NA, NA, NA, NA), + PINGROUP(83, sec_mi2s, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, + NA, NA), + PINGROUP(84, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA), + PINGROUP(85, blsp_spi12, blsp_uart12, blsp_uim12, NA, qdss_stm25, + qdss_tracedata_a, NA, NA, NA), + PINGROUP(86, blsp_spi12, blsp_uart12, blsp_uim12, NA, NA, qdss_stm31, + qdss_tracedata_a, NA, NA), + PINGROUP(87, blsp_spi12, blsp_uart12, blsp_i2c12, NA, qdss_stm30, + qdss_tracedata_a, NA, NA, NA), + PINGROUP(88, blsp_spi12, blsp_uart12, blsp_i2c12, blsp10_spi, NA, + qdss_stm29, NA, NA, NA), + PINGROUP(89, tsif1_clk, qdss_stm28, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(90, tsif1_en, blsp1_spi, qdss_tracedata_a, NA, NA, NA, NA, NA, + NA), + PINGROUP(91, tsif1_data, sdc4_cmd, qdss_stm27, qdss_traceclk_b, NA, NA, + NA, NA, NA), + PINGROUP(92, tsif2_error, sdc43, vfr_1, qdss_stm26, qdss_tracedata_b, + NA, NA, NA, NA), + PINGROUP(93, tsif2_clk, sdc4_clk, NA, qdss_stm24, qdss_tracedata_b, NA, + NA, NA, NA), + PINGROUP(94, tsif2_en, sdc42, NA, qdss_stm23, qdss_tracectl_b, NA, NA, + NA, NA), + PINGROUP(95, tsif2_data, sdc41, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(96, tsif2_sync, sdc40, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(97, NA, NA, mdp_vsync_p_b, ldo_en, NA, NA, NA, NA, NA), + PINGROUP(98, NA, NA, mdp_vsync_s_b, ldo_update, NA, NA, NA, NA, NA), + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(100, NA, NA, blsp11_uart_tx_b, qdss_cti, NA, NA, NA, NA, NA), + PINGROUP(101, NA, blsp11_uart_rx_b, qdss_cti, NA, NA, NA, NA, NA, NA), + PINGROUP(102, NA, blsp11_i2c_sda_b, prng_rosc, NA, NA, NA, NA, NA, NA), + PINGROUP(103, NA, blsp11_i2c_scl_b, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(105, uim2, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(106, uim2, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(107, uim2, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(108, uim2, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(109, uim1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(110, uim1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(111, uim1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(112, uim1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(113, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(114, NA, pci_e2, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(115, NA, pci_e2, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(116, NA, pa_indicator, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(118, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(121, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(122, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(123, ddr_bist, qdss_tracedata_11, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(124, ddr_bist, qdss_tracedata_12, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(128, NA, modem_tsync, nav_dr, nav_pps, NA, NA, NA, NA, NA), + PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(130, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(131, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(134, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(135, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(137, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(138, NA, qspi_cs, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(139, NA, ssbi2, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(140, NA, ssbi1, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(141, NA, qspi_cs, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(144, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(145, mss_lte, qspi_clk, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(146, NA, qspi0, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(147, NA, qspi1, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(148, NA, qspi2, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(149, NA, qspi3, NA, NA, NA, NA, NA, NA, NA), + SDC_QDSD_PINGROUP(sdc1_clk, 0x12c000, 13, 6), + SDC_QDSD_PINGROUP(sdc1_cmd, 0x12c000, 11, 3), + SDC_QDSD_PINGROUP(sdc1_data, 0x12c000, 9, 0), + SDC_QDSD_PINGROUP(sdc2_clk, 0x12d000, 14, 6), + SDC_QDSD_PINGROUP(sdc2_cmd, 0x12d000, 11, 3), + SDC_QDSD_PINGROUP(sdc2_data, 0x12d000, 9, 0), + SDC_QDSD_PINGROUP(sdc1_rclk, 0x12c000, 15, 0), +}; + +static const struct msm_pinctrl_soc_data msm8996_pinctrl = { + .pins = msm8996_pins, + .npins = ARRAY_SIZE(msm8996_pins), + .functions = msm8996_functions, + .nfunctions = ARRAY_SIZE(msm8996_functions), + .groups = msm8996_groups, + .ngroups = ARRAY_SIZE(msm8996_groups), + .ngpios = 150, +}; + +static int msm8996_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &msm8996_pinctrl); +} + +static const struct of_device_id msm8996_pinctrl_of_match[] = { + { .compatible = "qcom,msm8996-pinctrl", }, + { } +}; + +static struct platform_driver msm8996_pinctrl_driver = { + .driver = { + .name = "msm8996-pinctrl", + .of_match_table = msm8996_pinctrl_of_match, + }, + .probe = msm8996_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init msm8996_pinctrl_init(void) +{ + return platform_driver_register(&msm8996_pinctrl_driver); +} +arch_initcall(msm8996_pinctrl_init); + +static void __exit msm8996_pinctrl_exit(void) +{ + platform_driver_unregister(&msm8996_pinctrl_driver); +} +module_exit(msm8996_pinctrl_exit); + +MODULE_DESCRIPTION("Qualcomm msm8996 pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, msm8996_pinctrl_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c index e9ff3bc150bb..f448534edf46 100644 --- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c +++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c @@ -32,6 +32,9 @@ static struct msm_pinctrl_soc_data qdf2xxx_pinctrl; +/* A reasonable limit to the number of GPIOS */ +#define MAX_GPIOS 256 + static int qdf2xxx_pinctrl_probe(struct platform_device *pdev) { struct pinctrl_pin_desc *pins; @@ -42,11 +45,13 @@ static int qdf2xxx_pinctrl_probe(struct platform_device *pdev) /* Query the number of GPIOs from ACPI */ ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios); - if (ret < 0) + if (ret < 0) { + dev_warn(&pdev->dev, "missing num-gpios property\n"); return ret; + } - if (!num_gpios) { - dev_warn(&pdev->dev, "missing num-gpios property\n"); + if (!num_gpios || num_gpios > MAX_GPIOS) { + dev_warn(&pdev->dev, "invalid num-gpios property\n"); return -ENODEV; } @@ -55,6 +60,9 @@ static int qdf2xxx_pinctrl_probe(struct platform_device *pdev) groups = devm_kcalloc(&pdev->dev, num_gpios, sizeof(struct msm_pingroup), GFP_KERNEL); + if (!pins || !groups) + return -ENOMEM; + for (i = 0; i < num_gpios; i++) { pins[i].number = i; diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 6c42ca14d2fd..77f6a5cb1008 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -14,6 +14,7 @@ #include <linux/gpio.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinmux.h> @@ -693,18 +694,19 @@ static int pmic_gpio_probe(struct platform_device *pdev) struct pmic_gpio_pad *pad, *pads; struct pmic_gpio_state *state; int ret, npins, i; - u32 res[2]; + u32 reg; - ret = of_property_read_u32_array(dev->of_node, "reg", res, 2); + ret = of_property_read_u32(dev->of_node, "reg", ®); if (ret < 0) { - dev_err(dev, "missing base address and/or range"); + dev_err(dev, "missing base address"); return ret; } - npins = res[1] / PMIC_GPIO_ADDRESS_RANGE; - + npins = platform_irq_count(pdev); if (!npins) return -EINVAL; + if (npins < 0) + return npins; BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups)); @@ -752,7 +754,7 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (pad->irq < 0) return pad->irq; - pad->base = res[0] + i * PMIC_GPIO_ADDRESS_RANGE; + pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE; ret = pmic_gpio_populate(state, pad); if (ret < 0) @@ -804,6 +806,7 @@ static int pmic_gpio_remove(struct platform_device *pdev) static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */ { .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */ + { .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */ { .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */ { }, }; diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 9ce0e30e33e8..2df4f29175ae 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -14,6 +14,7 @@ #include <linux/gpio.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinmux.h> @@ -795,17 +796,19 @@ static int pmic_mpp_probe(struct platform_device *pdev) struct pmic_mpp_pad *pad, *pads; struct pmic_mpp_state *state; int ret, npins, i; - u32 res[2]; + u32 reg; - ret = of_property_read_u32_array(dev->of_node, "reg", res, 2); + ret = of_property_read_u32(dev->of_node, "reg", ®); if (ret < 0) { - dev_err(dev, "missing base address and/or range"); + dev_err(dev, "missing base address"); return ret; } - npins = res[1] / PMIC_MPP_ADDRESS_RANGE; + npins = platform_irq_count(pdev); if (!npins) return -EINVAL; + if (npins < 0) + return npins; BUG_ON(npins > ARRAY_SIZE(pmic_mpp_groups)); @@ -854,7 +857,7 @@ static int pmic_mpp_probe(struct platform_device *pdev) if (pad->irq < 0) return pad->irq; - pad->base = res[0] + i * PMIC_MPP_ADDRESS_RANGE; + pad->base = reg + i * PMIC_MPP_ADDRESS_RANGE; ret = pmic_mpp_populate(state, pad); if (ret < 0) @@ -907,6 +910,7 @@ static const struct of_device_id pmic_mpp_of_match[] = { { .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */ { .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */ { .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */ + { .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */ { .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */ { }, }; diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index 19a3c3bc2f1f..e51176ec83d2 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -23,6 +23,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <dt-bindings/pinctrl/qcom,pmic-gpio.h> @@ -650,11 +651,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl, } static const struct of_device_id pm8xxx_gpio_of_match[] = { - { .compatible = "qcom,pm8018-gpio", .data = (void *)6 }, - { .compatible = "qcom,pm8038-gpio", .data = (void *)12 }, - { .compatible = "qcom,pm8058-gpio", .data = (void *)40 }, - { .compatible = "qcom,pm8917-gpio", .data = (void *)38 }, - { .compatible = "qcom,pm8921-gpio", .data = (void *)44 }, + { .compatible = "qcom,pm8018-gpio" }, + { .compatible = "qcom,pm8038-gpio" }, + { .compatible = "qcom,pm8058-gpio" }, + { .compatible = "qcom,pm8917-gpio" }, + { .compatible = "qcom,pm8921-gpio" }, + { .compatible = "qcom,ssbi-gpio" }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match); @@ -665,14 +667,19 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) struct pinctrl_pin_desc *pins; struct pm8xxx_gpio *pctrl; int ret; - int i; + int i, npins; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); + npins = platform_irq_count(pdev); + if (!npins) + return -EINVAL; + if (npins < 0) + return npins; + pctrl->npins = npins; pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index b868ef1766a0..e9f01de51e18 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -23,6 +23,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <dt-bindings/pinctrl/qcom,pmic-mpp.h> @@ -741,11 +742,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl, } static const struct of_device_id pm8xxx_mpp_of_match[] = { - { .compatible = "qcom,pm8018-mpp", .data = (void *)6 }, - { .compatible = "qcom,pm8038-mpp", .data = (void *)6 }, - { .compatible = "qcom,pm8917-mpp", .data = (void *)10 }, - { .compatible = "qcom,pm8821-mpp", .data = (void *)4 }, - { .compatible = "qcom,pm8921-mpp", .data = (void *)12 }, + { .compatible = "qcom,pm8018-mpp" }, + { .compatible = "qcom,pm8038-mpp" }, + { .compatible = "qcom,pm8917-mpp" }, + { .compatible = "qcom,pm8821-mpp" }, + { .compatible = "qcom,pm8921-mpp" }, + { .compatible = "qcom,ssbi-mpp" }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match); @@ -756,14 +758,19 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) struct pinctrl_pin_desc *pins; struct pm8xxx_mpp *pctrl; int ret; - int i; + int i, npins; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); + npins = platform_irq_count(pdev); + if (!npins) + return -EINVAL; + if (npins < 0) + return npins; + pctrl->npins = npins; pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 71ccf6a90b22..16e2293cc2bc 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -1150,6 +1150,109 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { }, }; +/* pin banks of exynos5410 pin-controller 0 */ +static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), + EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), + EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08), + EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c), + EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10), + EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14), + EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18), + EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c), + EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20), + EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24), + EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28), + EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"), + EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c), + EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30), + EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34), + EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf0", 0x38), + EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpf1", 0x3c), + EXYNOS_PIN_BANK_EINTG(8, 0x220, "gpg0", 0x40), + EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpg1", 0x44), + EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48), + EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c), + EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50), + EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"), + EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"), + EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"), + EXYNOS_PIN_BANK_EINTN(6, 0x320, "gpy2"), + EXYNOS_PIN_BANK_EINTN(8, 0x340, "gpy3"), + EXYNOS_PIN_BANK_EINTN(8, 0x360, "gpy4"), + EXYNOS_PIN_BANK_EINTN(8, 0x380, "gpy5"), + EXYNOS_PIN_BANK_EINTN(8, 0x3A0, "gpy6"), + EXYNOS_PIN_BANK_EINTN(8, 0x3C0, "gpy7"), + EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00), + EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04), + EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08), + EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c), +}; + +/* pin banks of exynos5410 pin-controller 1 */ +static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst = { + EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpj0", 0x00), + EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpj1", 0x04), + EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpj2", 0x08), + EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpj3", 0x0c), + EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpj4", 0x10), + EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpk0", 0x14), + EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpk1", 0x18), + EXYNOS_PIN_BANK_EINTG(8, 0x0E0, "gpk2", 0x1c), + EXYNOS_PIN_BANK_EINTG(7, 0x100, "gpk3", 0x20), +}; + +/* pin banks of exynos5410 pin-controller 2 */ +static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00), + EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04), + EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08), + EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c), + EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10), +}; + +/* pin banks of exynos5410 pin-controller 3 */ +static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = { + EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00), +}; + +/* + * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes + * four gpio/pin-mux/pinconfig controllers. + */ +const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5410_pin_banks0, + .nr_banks = ARRAY_SIZE(exynos5410_pin_banks0), + .eint_gpio_init = exynos_eint_gpio_init, + .eint_wkup_init = exynos_eint_wkup_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 1 data */ + .pin_banks = exynos5410_pin_banks1, + .nr_banks = ARRAY_SIZE(exynos5410_pin_banks1), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 2 data */ + .pin_banks = exynos5410_pin_banks2, + .nr_banks = ARRAY_SIZE(exynos5410_pin_banks2), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 3 data */ + .pin_banks = exynos5410_pin_banks3, + .nr_banks = ARRAY_SIZE(exynos5410_pin_banks3), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, +}; + /* pin banks of exynos5420 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3f622ccd8eab..48294e7449a4 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1222,6 +1222,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = { .data = (void *)exynos5250_pin_ctrl }, { .compatible = "samsung,exynos5260-pinctrl", .data = (void *)exynos5260_pin_ctrl }, + { .compatible = "samsung,exynos5410-pinctrl", + .data = (void *)exynos5410_pin_ctrl }, { .compatible = "samsung,exynos5420-pinctrl", .data = (void *)exynos5420_pin_ctrl }, { .compatible = "samsung,exynos5433-pinctrl", diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index c1239ff6157d..cd31bfaf62cb 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -270,6 +270,7 @@ extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; +extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c index 02118ab336fc..1cbbe04d7df6 100644 --- a/drivers/pinctrl/sh-pfc/pfc-emev2.c +++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c @@ -258,18 +258,18 @@ static const u16 pinmux_data[] = { /* GPSR0 */ /* V9 */ - PINMUX_DATA(JT_SEL_MARK, FN_JT_SEL), + PINMUX_SINGLE(JT_SEL), /* U9 */ - PINMUX_DATA(ERR_RST_REQB_MARK, FN_ERR_RST_REQB), + PINMUX_SINGLE(ERR_RST_REQB), /* V8 */ - PINMUX_DATA(REF_CLKO_MARK, FN_REF_CLKO), + PINMUX_SINGLE(REF_CLKO), /* U8 */ - PINMUX_DATA(EXT_CLKI_MARK, FN_EXT_CLKI), + PINMUX_SINGLE(EXT_CLKI), /* B22*/ PINMUX_IPSR_NOFN(LCD3_1_0_PORT18, LCD3_PXCLK, SEL_LCD3_1_0_00), PINMUX_IPSR_NOFN(LCD3_1_0_PORT18, YUV3_CLK_O, SEL_LCD3_1_0_01), /* C21 */ - PINMUX_DATA(LCD3_PXCLKB_MARK, FN_LCD3_PXCLKB), + PINMUX_SINGLE(LCD3_PXCLKB), /* A21 */ PINMUX_IPSR_NOFN(LCD3_1_0_PORT20, LCD3_CLK_I, SEL_LCD3_1_0_00), PINMUX_IPSR_NOFN(LCD3_1_0_PORT20, YUV3_CLK_I, SEL_LCD3_1_0_01), @@ -285,17 +285,17 @@ static const u16 pinmux_data[] = { /* GPSR1 */ /* A20 */ - PINMUX_DATA(LCD3_R0_MARK, FN_LCD3_R0), + PINMUX_SINGLE(LCD3_R0), /* B20 */ - PINMUX_DATA(LCD3_R1_MARK, FN_LCD3_R1), + PINMUX_SINGLE(LCD3_R1), /* A19 */ - PINMUX_DATA(LCD3_R2_MARK, FN_LCD3_R2), + PINMUX_SINGLE(LCD3_R2), /* B19 */ - PINMUX_DATA(LCD3_R3_MARK, FN_LCD3_R3), + PINMUX_SINGLE(LCD3_R3), /* C19 */ - PINMUX_DATA(LCD3_R4_MARK, FN_LCD3_R4), + PINMUX_SINGLE(LCD3_R4), /* B18 */ - PINMUX_DATA(LCD3_R5_MARK, FN_LCD3_R5), + PINMUX_SINGLE(LCD3_R5), /* C18 */ PINMUX_IPSR_NOFN(LCD3_9_8_PORT38, LCD3_R6, SEL_LCD3_9_8_00), PINMUX_IPSR_NOFN(LCD3_9_8_PORT38, TP33_CLK, SEL_LCD3_9_8_10), @@ -367,9 +367,9 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D15, SEL_LCD3_11_10_01), PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA15, SEL_LCD3_11_10_10), /* AA9 */ - PINMUX_DATA(IIC0_SCL_MARK, FN_IIC0_SCL), + PINMUX_SINGLE(IIC0_SCL), /* AA8 */ - PINMUX_DATA(IIC0_SDA_MARK, FN_IIC0_SDA), + PINMUX_SINGLE(IIC0_SDA), /* Y9 */ PINMUX_IPSR_NOFN(IIC_1_0_PORT46, IIC1_SCL, SEL_IIC_1_0_00), PINMUX_IPSR_NOFN(IIC_1_0_PORT46, UART3_RX, SEL_IIC_1_0_01), @@ -377,51 +377,51 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_NOFN(IIC_1_0_PORT47, IIC1_SDA, SEL_IIC_1_0_00), PINMUX_IPSR_NOFN(IIC_1_0_PORT47, UART3_TX, SEL_IIC_1_0_01), /* AC19 */ - PINMUX_DATA(SD_CKI_MARK, FN_SD_CKI), + PINMUX_SINGLE(SD_CKI), /* AB18 */ - PINMUX_DATA(SDI0_CKO_MARK, FN_SDI0_CKO), + PINMUX_SINGLE(SDI0_CKO), /* AC18 */ - PINMUX_DATA(SDI0_CKI_MARK, FN_SDI0_CKI), + PINMUX_SINGLE(SDI0_CKI), /* Y12 */ - PINMUX_DATA(SDI0_CMD_MARK, FN_SDI0_CMD), + PINMUX_SINGLE(SDI0_CMD), /* AA13 */ - PINMUX_DATA(SDI0_DATA0_MARK, FN_SDI0_DATA0), + PINMUX_SINGLE(SDI0_DATA0), /* Y13 */ - PINMUX_DATA(SDI0_DATA1_MARK, FN_SDI0_DATA1), + PINMUX_SINGLE(SDI0_DATA1), /* AA14 */ - PINMUX_DATA(SDI0_DATA2_MARK, FN_SDI0_DATA2), + PINMUX_SINGLE(SDI0_DATA2), /* Y14 */ - PINMUX_DATA(SDI0_DATA3_MARK, FN_SDI0_DATA3), + PINMUX_SINGLE(SDI0_DATA3), /* AA15 */ - PINMUX_DATA(SDI0_DATA4_MARK, FN_SDI0_DATA4), + PINMUX_SINGLE(SDI0_DATA4), /* Y15 */ - PINMUX_DATA(SDI0_DATA5_MARK, FN_SDI0_DATA5), + PINMUX_SINGLE(SDI0_DATA5), /* AA16 */ - PINMUX_DATA(SDI0_DATA6_MARK, FN_SDI0_DATA6), + PINMUX_SINGLE(SDI0_DATA6), /* Y16 */ - PINMUX_DATA(SDI0_DATA7_MARK, FN_SDI0_DATA7), + PINMUX_SINGLE(SDI0_DATA7), /* AB22 */ - PINMUX_DATA(SDI1_CKO_MARK, FN_SDI1_CKO), + PINMUX_SINGLE(SDI1_CKO), /* AA23 */ - PINMUX_DATA(SDI1_CKI_MARK, FN_SDI1_CKI), + PINMUX_SINGLE(SDI1_CKI), /* AC21 */ - PINMUX_DATA(SDI1_CMD_MARK, FN_SDI1_CMD), + PINMUX_SINGLE(SDI1_CMD), /* GPSR2 */ /* AB21 */ - PINMUX_DATA(SDI1_DATA0_MARK, FN_SDI1_DATA0), + PINMUX_SINGLE(SDI1_DATA0), /* AB20 */ - PINMUX_DATA(SDI1_DATA1_MARK, FN_SDI1_DATA1), + PINMUX_SINGLE(SDI1_DATA1), /* AB19 */ - PINMUX_DATA(SDI1_DATA2_MARK, FN_SDI1_DATA2), + PINMUX_SINGLE(SDI1_DATA2), /* AA19 */ - PINMUX_DATA(SDI1_DATA3_MARK, FN_SDI1_DATA3), + PINMUX_SINGLE(SDI1_DATA3), /* J23 */ - PINMUX_DATA(AB_CLK_MARK, FN_AB_CLK), + PINMUX_SINGLE(AB_CLK), /* D21 */ - PINMUX_DATA(AB_CSB0_MARK, FN_AB_CSB0), + PINMUX_SINGLE(AB_CSB0), /* E21 */ - PINMUX_DATA(AB_CSB1_MARK, FN_AB_CSB1), + PINMUX_SINGLE(AB_CSB1), /* F20 */ PINMUX_IPSR_NOFN(AB_1_0_PORT71, AB_CSB2, SEL_AB_1_0_00), PINMUX_IPSR_NOFN(AB_1_0_PORT71, CF_CSB0, SEL_AB_1_0_10), @@ -514,7 +514,7 @@ static const u16 pinmux_data[] = { /* GPSR3 */ /* M21 */ - PINMUX_DATA(AB_A20_MARK, FN_AB_A20), + PINMUX_SINGLE(AB_A20), /* N21 */ PINMUX_IPSR_NOFN(AB_9_8_PORT97, AB_A21, SEL_AB_9_8_00), PINMUX_IPSR_NOFN(AB_9_8_PORT97, SDI2_CKO, SEL_AB_9_8_01), @@ -541,13 +541,13 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_NOFN(AB_13_12_PORT104, AB_A28, SEL_AB_13_12_00), PINMUX_IPSR_NOFN(AB_13_12_PORT104, AB_BEN1, SEL_AB_13_12_10), /* B8 */ - PINMUX_DATA(USI0_CS1_MARK, FN_USI0_CS1), + PINMUX_SINGLE(USI0_CS1), /* B9 */ - PINMUX_DATA(USI0_CS2_MARK, FN_USI0_CS2), + PINMUX_SINGLE(USI0_CS2), /* C10 */ - PINMUX_DATA(USI1_DI_MARK, FN_USI1_DI), + PINMUX_SINGLE(USI1_DI), /* D10 */ - PINMUX_DATA(USI1_DO_MARK, FN_USI1_DO), + PINMUX_SINGLE(USI1_DO), /* AB5 */ PINMUX_IPSR_NOFN(USI_1_0_PORT109, USI2_CLK, SEL_USI_1_0_00), PINMUX_IPSR_NOFN(USI_1_0_PORT109, DTV_BCLK_B, SEL_USI_1_0_01), @@ -587,49 +587,49 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_NOFN(USI_9_8_PORT121, PWM1, SEL_USI_9_8_00), PINMUX_IPSR_NOFN(USI_9_8_PORT121, USI4_DO, SEL_USI_9_8_01), /* V20 */ - PINMUX_DATA(NTSC_CLK_MARK, FN_NTSC_CLK), + PINMUX_SINGLE(NTSC_CLK), /* P20 */ - PINMUX_DATA(NTSC_DATA0_MARK, FN_NTSC_DATA0), + PINMUX_SINGLE(NTSC_DATA0), /* P18 */ - PINMUX_DATA(NTSC_DATA1_MARK, FN_NTSC_DATA1), + PINMUX_SINGLE(NTSC_DATA1), /* R20 */ - PINMUX_DATA(NTSC_DATA2_MARK, FN_NTSC_DATA2), + PINMUX_SINGLE(NTSC_DATA2), /* R18 */ - PINMUX_DATA(NTSC_DATA3_MARK, FN_NTSC_DATA3), + PINMUX_SINGLE(NTSC_DATA3), /* T20 */ - PINMUX_DATA(NTSC_DATA4_MARK, FN_NTSC_DATA4), + PINMUX_SINGLE(NTSC_DATA4), /* GPRS3 */ /* T18 */ - PINMUX_DATA(NTSC_DATA5_MARK, FN_NTSC_DATA5), + PINMUX_SINGLE(NTSC_DATA5), /* U20 */ - PINMUX_DATA(NTSC_DATA6_MARK, FN_NTSC_DATA6), + PINMUX_SINGLE(NTSC_DATA6), /* U18 */ - PINMUX_DATA(NTSC_DATA7_MARK, FN_NTSC_DATA7), + PINMUX_SINGLE(NTSC_DATA7), /* W23 */ - PINMUX_DATA(CAM_CLKO_MARK, FN_CAM_CLKO), + PINMUX_SINGLE(CAM_CLKO), /* Y23 */ - PINMUX_DATA(CAM_CLKI_MARK, FN_CAM_CLKI), + PINMUX_SINGLE(CAM_CLKI), /* W22 */ - PINMUX_DATA(CAM_VS_MARK, FN_CAM_VS), + PINMUX_SINGLE(CAM_VS), /* V21 */ - PINMUX_DATA(CAM_HS_MARK, FN_CAM_HS), + PINMUX_SINGLE(CAM_HS), /* T21 */ - PINMUX_DATA(CAM_YUV0_MARK, FN_CAM_YUV0), + PINMUX_SINGLE(CAM_YUV0), /* T22 */ - PINMUX_DATA(CAM_YUV1_MARK, FN_CAM_YUV1), + PINMUX_SINGLE(CAM_YUV1), /* T23 */ - PINMUX_DATA(CAM_YUV2_MARK, FN_CAM_YUV2), + PINMUX_SINGLE(CAM_YUV2), /* U21 */ - PINMUX_DATA(CAM_YUV3_MARK, FN_CAM_YUV3), + PINMUX_SINGLE(CAM_YUV3), /* U22 */ - PINMUX_DATA(CAM_YUV4_MARK, FN_CAM_YUV4), + PINMUX_SINGLE(CAM_YUV4), /* U23 */ - PINMUX_DATA(CAM_YUV5_MARK, FN_CAM_YUV5), + PINMUX_SINGLE(CAM_YUV5), /* V22 */ - PINMUX_DATA(CAM_YUV6_MARK, FN_CAM_YUV6), + PINMUX_SINGLE(CAM_YUV6), /* V23 */ - PINMUX_DATA(CAM_YUV7_MARK, FN_CAM_YUV7), + PINMUX_SINGLE(CAM_YUV7), /* K22 */ PINMUX_IPSR_NOFN(HSI_1_0_PORT143, USI5_CLK_B, SEL_HSI_1_0_01), /* K23 */ @@ -647,17 +647,17 @@ static const u16 pinmux_data[] = { /* M22 */ PINMUX_IPSR_NOFN(HSI_1_0_PORT150, USI5_DI_B, SEL_HSI_1_0_01), /* D13 */ - PINMUX_DATA(JT_TDO_MARK, FN_JT_TDO), + PINMUX_SINGLE(JT_TDO), /* F13 */ - PINMUX_DATA(JT_TDOEN_MARK, FN_JT_TDOEN), + PINMUX_SINGLE(JT_TDOEN), /* AA12 */ - PINMUX_DATA(USB_VBUS_MARK, FN_USB_VBUS), + PINMUX_SINGLE(USB_VBUS), /* A12 */ - PINMUX_DATA(LOWPWR_MARK, FN_LOWPWR), + PINMUX_SINGLE(LOWPWR), /* Y11 */ - PINMUX_DATA(UART1_RX_MARK, FN_UART1_RX), + PINMUX_SINGLE(UART1_RX), /* Y10 */ - PINMUX_DATA(UART1_TX_MARK, FN_UART1_TX), + PINMUX_SINGLE(UART1_TX), /* AA10 */ PINMUX_IPSR_NOFN(UART_1_0_PORT157, UART1_CTSB, SEL_UART_1_0_00), PINMUX_IPSR_NOFN(UART_1_0_PORT157, UART2_RX, SEL_UART_1_0_01), @@ -749,7 +749,7 @@ static const unsigned int cf_ctrl_mux[] = { }; static const unsigned int cf_data8_pins[] = { - /* CF_D[0:8] */ + /* CF_D[0:7] */ 77, 78, 79, 80, 81, 82, 83, 84, }; diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c index 279e9dd442e4..7f7c8a6e76e8 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c @@ -2214,7 +2214,7 @@ static const unsigned int lcd1_data9_mux[] = { LCD1_D8_MARK, }; static const unsigned int lcd1_data12_pins[] = { - /* D[0:12] */ + /* D[0:11] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, 94, 21, 201, }; diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index bbd35dc1a0c4..ad09a670c2ff 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -548,17 +548,17 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(PENC0_MARK, FN_PENC0), - PINMUX_DATA(PENC1_MARK, FN_PENC1), - PINMUX_DATA(A1_MARK, FN_A1), - PINMUX_DATA(A2_MARK, FN_A2), - PINMUX_DATA(A3_MARK, FN_A3), - PINMUX_DATA(WE0_MARK, FN_WE0), - PINMUX_DATA(AUDIO_CLKA_MARK, FN_AUDIO_CLKA), - PINMUX_DATA(AUDIO_CLKB_MARK, FN_AUDIO_CLKB), - PINMUX_DATA(SSI_SCK34_MARK, FN_SSI_SCK34), - PINMUX_DATA(AVS1_MARK, FN_AVS1), - PINMUX_DATA(AVS2_MARK, FN_AVS2), + PINMUX_SINGLE(PENC0), + PINMUX_SINGLE(PENC1), + PINMUX_SINGLE(A1), + PINMUX_SINGLE(A2), + PINMUX_SINGLE(A3), + PINMUX_SINGLE(WE0), + PINMUX_SINGLE(AUDIO_CLKA), + PINMUX_SINGLE(AUDIO_CLKB), + PINMUX_SINGLE(SSI_SCK34), + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(AVS2), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_1_0, PRESETOUT), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c index ed4e0788035c..bd17eccb6a89 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c @@ -23,13 +23,6 @@ #include "sh_pfc.h" -#define PORT_GP_9(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_32(0, fn, sfx), \ PORT_GP_32(1, fn, sfx), \ @@ -609,14 +602,14 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(AVS1_MARK, FN_AVS1), - PINMUX_DATA(AVS1_MARK, FN_AVS1), - PINMUX_DATA(A17_MARK, FN_A17), - PINMUX_DATA(A18_MARK, FN_A18), - PINMUX_DATA(A19_MARK, FN_A19), + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(A17), + PINMUX_SINGLE(A18), + PINMUX_SINGLE(A19), - PINMUX_DATA(USB_PENC0_MARK, FN_USB_PENC0), - PINMUX_DATA(USB_PENC1_MARK, FN_USB_PENC1), + PINMUX_SINGLE(USB_PENC0), + PINMUX_SINGLE(USB_PENC1), PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2), PINMUX_IPSR_MSEL(IP0_2_0, SCK0, SEL_SCIF0_0), @@ -2289,6 +2282,35 @@ static const unsigned int scif5_clk_d_pins[] = { static const unsigned int scif5_clk_d_mux[] = { SCK5_D_MARK, }; +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(4, 28), +}; +static const unsigned int scif_clk_mux[] = { + SCIF_CLK_MARK, +}; +static const unsigned int scif_clk_b_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(4, 5), +}; +static const unsigned int scif_clk_b_mux[] = { + SCIF_CLK_B_MARK, +}; +static const unsigned int scif_clk_c_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(4, 18), +}; +static const unsigned int scif_clk_c_mux[] = { + SCIF_CLK_C_MARK, +}; +static const unsigned int scif_clk_d_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(2, 29), +}; +static const unsigned int scif_clk_d_mux[] = { + SCIF_CLK_D_MARK, +}; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ @@ -2700,6 +2722,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scif5_clk_c), SH_PFC_PIN_GROUP(scif5_data_d), SH_PFC_PIN_GROUP(scif5_clk_d), + SH_PFC_PIN_GROUP(scif_clk), + SH_PFC_PIN_GROUP(scif_clk_b), + SH_PFC_PIN_GROUP(scif_clk_c), + SH_PFC_PIN_GROUP(scif_clk_d), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), @@ -2909,6 +2935,13 @@ static const char * const scif5_groups[] = { "scif5_clk_d", }; +static const char * const scif_clk_groups[] = { + "scif_clk", + "scif_clk_b", + "scif_clk_c", + "scif_clk_d", +}; + static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", @@ -3004,6 +3037,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scif3), SH_PFC_FUNCTION(scif4), SH_PFC_FUNCTION(scif5), + SH_PFC_FUNCTION(scif_clk), SH_PFC_FUNCTION(usb0), SH_PFC_FUNCTION(usb1), SH_PFC_FUNCTION(usb2), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index d9924b0d53b7..a8b629bc7a55 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -26,23 +26,6 @@ #include "core.h" #include "sh_pfc.h" -#define PORT_GP_30(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ - PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ - PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ - PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \ - PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \ - PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ - PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ - PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ - PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \ - PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \ - PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_32(0, fn, sfx), \ PORT_GP_30(1, fn, sfx), \ @@ -806,15 +789,15 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(VI1_DATA7_VI1_B7_MARK, FN_VI1_DATA7_VI1_B7), - PINMUX_DATA(USB0_PWEN_MARK, FN_USB0_PWEN), - PINMUX_DATA(USB0_OVC_VBUS_MARK, FN_USB0_OVC_VBUS), - PINMUX_DATA(USB2_PWEN_MARK, FN_USB2_PWEN), - PINMUX_DATA(USB2_OVC_MARK, FN_USB2_OVC), - PINMUX_DATA(AVS1_MARK, FN_AVS1), - PINMUX_DATA(AVS2_MARK, FN_AVS2), - PINMUX_DATA(DU_DOTCLKIN0_MARK, FN_DU_DOTCLKIN0), - PINMUX_DATA(DU_DOTCLKIN2_MARK, FN_DU_DOTCLKIN2), + PINMUX_SINGLE(VI1_DATA7_VI1_B7), + PINMUX_SINGLE(USB0_PWEN), + PINMUX_SINGLE(USB0_OVC_VBUS), + PINMUX_SINGLE(USB2_PWEN), + PINMUX_SINGLE(USB2_OVC), + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(AVS2), + PINMUX_SINGLE(DU_DOTCLKIN0), + PINMUX_SINGLE(DU_DOTCLKIN2), PINMUX_IPSR_DATA(IP0_2_0, D0), PINMUX_IPSR_MSEL(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1), @@ -3236,6 +3219,21 @@ static const unsigned int scifb2_data_c_pins[] = { static const unsigned int scifb2_data_c_mux[] = { SCIFB2_RXD_C_MARK, SCIFB2_TXD_C_MARK, }; +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(4, 26), +}; +static const unsigned int scif_clk_mux[] = { + SCIF_CLK_MARK, +}; +static const unsigned int scif_clk_b_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(5, 4), +}; +static const unsigned int scif_clk_b_mux[] = { + SCIF_CLK_B_MARK, +}; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ @@ -4139,6 +4137,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scifb2_clk_b), SH_PFC_PIN_GROUP(scifb2_ctrl_b), SH_PFC_PIN_GROUP(scifb2_data_c), + SH_PFC_PIN_GROUP(scif_clk), + SH_PFC_PIN_GROUP(scif_clk_b), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), @@ -4555,6 +4555,11 @@ static const char * const scifb2_groups[] = { "scifb2_data_c", }; +static const char * const scif_clk_groups[] = { + "scif_clk", + "scif_clk_b", +}; + static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", @@ -4729,6 +4734,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), + SH_PFC_FUNCTION(scif_clk), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 87a4f44147c1..4cfbb94ad5d0 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -2,6 +2,7 @@ * r8a7791 processor support - PFC hardware block. * * Copyright (C) 2013 Renesas Electronics Corporation + * Copyright (C) 2014-2015 Cogent Embedded, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 @@ -13,21 +14,6 @@ #include "core.h" #include "sh_pfc.h" -#define PORT_GP_26(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ - PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ - PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ - PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \ - PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \ - PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ - PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ - PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ - PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_32(0, fn, sfx), \ PORT_GP_26(1, fn, sfx), \ @@ -787,23 +773,23 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(EX_CS0_N_MARK, FN_EX_CS0_N), - PINMUX_DATA(RD_N_MARK, FN_RD_N), - PINMUX_DATA(AUDIO_CLKA_MARK, FN_AUDIO_CLKA), - PINMUX_DATA(VI0_CLK_MARK, FN_VI0_CLK), - PINMUX_DATA(VI0_DATA0_VI0_B0_MARK, FN_VI0_DATA0_VI0_B0), - PINMUX_DATA(VI0_DATA1_VI0_B1_MARK, FN_VI0_DATA1_VI0_B1), - PINMUX_DATA(VI0_DATA2_VI0_B2_MARK, FN_VI0_DATA2_VI0_B2), - PINMUX_DATA(VI0_DATA4_VI0_B4_MARK, FN_VI0_DATA4_VI0_B4), - PINMUX_DATA(VI0_DATA5_VI0_B5_MARK, FN_VI0_DATA5_VI0_B5), - PINMUX_DATA(VI0_DATA6_VI0_B6_MARK, FN_VI0_DATA6_VI0_B6), - PINMUX_DATA(VI0_DATA7_VI0_B7_MARK, FN_VI0_DATA7_VI0_B7), - PINMUX_DATA(USB0_PWEN_MARK, FN_USB0_PWEN), - PINMUX_DATA(USB0_OVC_MARK, FN_USB0_OVC), - PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), - PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), - PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), - PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), + PINMUX_SINGLE(EX_CS0_N), + PINMUX_SINGLE(RD_N), + PINMUX_SINGLE(AUDIO_CLKA), + PINMUX_SINGLE(VI0_CLK), + PINMUX_SINGLE(VI0_DATA0_VI0_B0), + PINMUX_SINGLE(VI0_DATA1_VI0_B1), + PINMUX_SINGLE(VI0_DATA2_VI0_B2), + PINMUX_SINGLE(VI0_DATA4_VI0_B4), + PINMUX_SINGLE(VI0_DATA5_VI0_B5), + PINMUX_SINGLE(VI0_DATA6_VI0_B6), + PINMUX_SINGLE(VI0_DATA7_VI0_B7), + PINMUX_SINGLE(USB0_PWEN), + PINMUX_SINGLE(USB0_OVC), + PINMUX_SINGLE(USB1_PWEN), + PINMUX_SINGLE(USB1_OVC), + PINMUX_SINGLE(DU0_DOTCLKIN), + PINMUX_SINGLE(SD1_CLK), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_0, D0), @@ -1740,6 +1726,82 @@ static const unsigned int audio_clkout_mux[] = { AUDIO_CLKOUT_MARK, }; +/* - AVB -------------------------------------------------------------------- */ +static const unsigned int avb_link_pins[] = { + RCAR_GP_PIN(5, 14), +}; +static const unsigned int avb_link_mux[] = { + AVB_LINK_MARK, +}; +static const unsigned int avb_magic_pins[] = { + RCAR_GP_PIN(5, 11), +}; +static const unsigned int avb_magic_mux[] = { + AVB_MAGIC_MARK, +}; +static const unsigned int avb_phy_int_pins[] = { + RCAR_GP_PIN(5, 16), +}; +static const unsigned int avb_phy_int_mux[] = { + AVB_PHY_INT_MARK, +}; +static const unsigned int avb_mdio_pins[] = { + RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 9), +}; +static const unsigned int avb_mdio_mux[] = { + AVB_MDC_MARK, AVB_MDIO_MARK, +}; +static const unsigned int avb_mii_pins[] = { + RCAR_GP_PIN(5, 18), RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 20), + RCAR_GP_PIN(5, 21), + + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2), + RCAR_GP_PIN(5, 3), + + RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 10), + RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 26), RCAR_GP_PIN(5, 27), + RCAR_GP_PIN(5, 28), RCAR_GP_PIN(5, 29), +}; +static const unsigned int avb_mii_mux[] = { + AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK, + AVB_TXD3_MARK, + + AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK, + AVB_RXD3_MARK, + + AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK, + AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK, + AVB_TX_CLK_MARK, AVB_COL_MARK, +}; +static const unsigned int avb_gmii_pins[] = { + RCAR_GP_PIN(5, 18), RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 20), + RCAR_GP_PIN(5, 21), RCAR_GP_PIN(5, 22), RCAR_GP_PIN(5, 23), + RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25), + + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2), + RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5), + RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), + + RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 10), + RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 30), RCAR_GP_PIN(5, 17), + RCAR_GP_PIN(5, 26), RCAR_GP_PIN(5, 27), RCAR_GP_PIN(5, 28), + RCAR_GP_PIN(5, 29), +}; +static const unsigned int avb_gmii_mux[] = { + AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK, + AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK, + AVB_TXD6_MARK, AVB_TXD7_MARK, + + AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK, + AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK, + AVB_RXD6_MARK, AVB_RXD7_MARK, + + AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK, + AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK, + AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK, + AVB_COL_MARK, +}; + /* - CAN -------------------------------------------------------------------- */ static const unsigned int can0_data_pins[] = { @@ -3602,6 +3664,23 @@ static const unsigned int scifb2_data_d_pins[] = { static const unsigned int scifb2_data_d_mux[] = { SCIFB2_RXD_D_MARK, SCIFB2_TXD_D_MARK, }; + +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(2, 29), +}; +static const unsigned int scif_clk_mux[] = { + SCIF_CLK_MARK, +}; +static const unsigned int scif_clk_b_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(7, 19), +}; +static const unsigned int scif_clk_b_mux[] = { + SCIF_CLK_B_MARK, +}; + /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ @@ -4258,6 +4337,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(audio_clk_b_b), SH_PFC_PIN_GROUP(audio_clk_c), SH_PFC_PIN_GROUP(audio_clkout), + SH_PFC_PIN_GROUP(avb_link), + SH_PFC_PIN_GROUP(avb_magic), + SH_PFC_PIN_GROUP(avb_phy_int), + SH_PFC_PIN_GROUP(avb_mdio), + SH_PFC_PIN_GROUP(avb_mii), + SH_PFC_PIN_GROUP(avb_gmii), SH_PFC_PIN_GROUP(can0_data), SH_PFC_PIN_GROUP(can0_data_b), SH_PFC_PIN_GROUP(can0_data_c), @@ -4510,6 +4595,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scifb2_data_c), SH_PFC_PIN_GROUP(scifb2_clk_c), SH_PFC_PIN_GROUP(scifb2_data_d), + SH_PFC_PIN_GROUP(scif_clk), + SH_PFC_PIN_GROUP(scif_clk_b), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), @@ -4597,6 +4684,15 @@ static const char * const audio_clk_groups[] = { "audio_clkout", }; +static const char * const avb_groups[] = { + "avb_link", + "avb_magic", + "avb_phy_int", + "avb_mdio", + "avb_mii", + "avb_gmii", +}; + static const char * const can0_groups[] = { "can0_data", "can0_data_b", @@ -4976,6 +5072,11 @@ static const char * const scifb2_groups[] = { "scifb2_data_d", }; +static const char * const scif_clk_groups[] = { + "scif_clk", + "scif_clk_b", +}; + static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", @@ -5081,6 +5182,7 @@ static const char * const vin2_groups[] = { static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(audio_clk), + SH_PFC_FUNCTION(avb), SH_PFC_FUNCTION(can0), SH_PFC_FUNCTION(can1), SH_PFC_FUNCTION(du), @@ -5126,6 +5228,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), + SH_PFC_FUNCTION(scif_clk), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c index 086f6798b129..3718c7846bfd 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c @@ -15,25 +15,6 @@ #include "core.h" #include "sh_pfc.h" -#define PORT_GP_26(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ - PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ - PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ - PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \ - PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \ - PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ - PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ - PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ - PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx) - -#define PORT_GP_28(bank, fn, sfx) \ - PORT_GP_26(bank, fn, sfx), \ - PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_32(0, fn, sfx), \ PORT_GP_26(1, fn, sfx), \ @@ -618,28 +599,28 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(A2_MARK, FN_A2), - PINMUX_DATA(WE0_N_MARK, FN_WE0_N), - PINMUX_DATA(WE1_N_MARK, FN_WE1_N), - PINMUX_DATA(DACK0_MARK, FN_DACK0), - PINMUX_DATA(USB0_PWEN_MARK, FN_USB0_PWEN), - PINMUX_DATA(USB0_OVC_MARK, FN_USB0_OVC), - PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), - PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), - PINMUX_DATA(SD0_CLK_MARK, FN_SD0_CLK), - PINMUX_DATA(SD0_CMD_MARK, FN_SD0_CMD), - PINMUX_DATA(SD0_DATA0_MARK, FN_SD0_DATA0), - PINMUX_DATA(SD0_DATA1_MARK, FN_SD0_DATA1), - PINMUX_DATA(SD0_DATA2_MARK, FN_SD0_DATA2), - PINMUX_DATA(SD0_DATA3_MARK, FN_SD0_DATA3), - PINMUX_DATA(SD0_CD_MARK, FN_SD0_CD), - PINMUX_DATA(SD0_WP_MARK, FN_SD0_WP), - PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), - PINMUX_DATA(SD1_CMD_MARK, FN_SD1_CMD), - PINMUX_DATA(SD1_DATA0_MARK, FN_SD1_DATA0), - PINMUX_DATA(SD1_DATA1_MARK, FN_SD1_DATA1), - PINMUX_DATA(SD1_DATA2_MARK, FN_SD1_DATA2), - PINMUX_DATA(SD1_DATA3_MARK, FN_SD1_DATA3), + PINMUX_SINGLE(A2), + PINMUX_SINGLE(WE0_N), + PINMUX_SINGLE(WE1_N), + PINMUX_SINGLE(DACK0), + PINMUX_SINGLE(USB0_PWEN), + PINMUX_SINGLE(USB0_OVC), + PINMUX_SINGLE(USB1_PWEN), + PINMUX_SINGLE(USB1_OVC), + PINMUX_SINGLE(SD0_CLK), + PINMUX_SINGLE(SD0_CMD), + PINMUX_SINGLE(SD0_DATA0), + PINMUX_SINGLE(SD0_DATA1), + PINMUX_SINGLE(SD0_DATA2), + PINMUX_SINGLE(SD0_DATA3), + PINMUX_SINGLE(SD0_CD), + PINMUX_SINGLE(SD0_WP), + PINMUX_SINGLE(SD1_CLK), + PINMUX_SINGLE(SD1_CMD), + PINMUX_SINGLE(SD1_DATA0), + PINMUX_SINGLE(SD1_DATA1), + PINMUX_SINGLE(SD1_DATA2), + PINMUX_SINGLE(SD1_DATA3), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_0, SD1_CD), @@ -2644,6 +2625,21 @@ static const unsigned int scifb2_ctrl_pins[] = { static const unsigned int scifb2_ctrl_mux[] = { SCIFB2_RTS_N_MARK, SCIFB2_CTS_N_MARK, }; +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(1, 23), +}; +static const unsigned int scif_clk_mux[] = { + SCIF_CLK_MARK, +}; +static const unsigned int scif_clk_b_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(3, 29), +}; +static const unsigned int scif_clk_b_mux[] = { + SCIF_CLK_B_MARK, +}; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ @@ -3071,6 +3067,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scifb2_data), SH_PFC_PIN_GROUP(scifb2_clk), SH_PFC_PIN_GROUP(scifb2_ctrl), + SH_PFC_PIN_GROUP(scif_clk), + SH_PFC_PIN_GROUP(scif_clk_b), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), @@ -3354,6 +3352,11 @@ static const char * const scifb2_groups[] = { "scifb2_ctrl", }; +static const char * const scif_clk_groups[] = { + "scif_clk", + "scif_clk_b", +}; + static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", @@ -3441,6 +3444,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), + SH_PFC_FUNCTION(scif_clk), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 7ddb2adfc5a5..ce4f5cdb0579 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c @@ -13,46 +13,15 @@ #include "core.h" #include "sh_pfc.h" -#define PORT_GP_3(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx) - -#define PORT_GP_14(bank, fn, sfx) \ - PORT_GP_3(bank, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ - PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ - PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ - PORT_GP_1(bank, 14, fn, sfx) - -#define PORT_GP_15(bank, fn, sfx) \ - PORT_GP_14(bank, fn, sfx), PORT_GP_1(bank, 15, fn, sfx) - -#define PORT_GP_17(bank, fn, sfx) \ - PORT_GP_15(bank, fn, sfx), \ - PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx) - -#define PORT_GP_25(bank, fn, sfx) \ - PORT_GP_17(bank, fn, sfx), \ - PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ - PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ - PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ - PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx) - -#define PORT_GP_27(bank, fn, sfx) \ - PORT_GP_25(bank, fn, sfx), \ - PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ - PORT_GP_15(0, fn, sfx), \ - PORT_GP_27(1, fn, sfx), \ - PORT_GP_14(2, fn, sfx), \ - PORT_GP_15(3, fn, sfx), \ - PORT_GP_17(4, fn, sfx), \ - PORT_GP_25(5, fn, sfx), \ + PORT_GP_16(0, fn, sfx), \ + PORT_GP_28(1, fn, sfx), \ + PORT_GP_15(2, fn, sfx), \ + PORT_GP_16(3, fn, sfx), \ + PORT_GP_18(4, fn, sfx), \ + PORT_GP_26(5, fn, sfx), \ PORT_GP_32(6, fn, sfx), \ - PORT_GP_3(7, fn, sfx) + PORT_GP_4(7, fn, sfx) /* * F_() : just information * FM() : macro for FN_xxx / xxx_MARK @@ -495,7 +464,7 @@ FM(IP16_31_28) IP16_31_28 #define MOD_SEL1_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) #define MOD_SEL1_12 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1) #define MOD_SEL1_11 FM(SEL_SCIF1_0) FM(SEL_SCIF1_1) -#define MOD_SEL1_10 FM(SEL_SCIF_0) FM(SEL_SCIF_1) +#define MOD_SEL1_10 FM(SEL_SATA_0) FM(SEL_SATA_1) #define MOD_SEL1_9 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) #define MOD_SEL1_6 FM(SEL_RCAN0_0) FM(SEL_RCAN0_1) #define MOD_SEL1_5 FM(SEL_PWM6_0) FM(SEL_PWM6_1) @@ -580,6 +549,25 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), + PINMUX_SINGLE(AVS1), + PINMUX_SINGLE(AVS2), + PINMUX_SINGLE(HDMI0_CEC), + PINMUX_SINGLE(HDMI1_CEC), + PINMUX_SINGLE(MSIOF0_RXD), + PINMUX_SINGLE(MSIOF0_SCK), + PINMUX_SINGLE(MSIOF0_TXD), + PINMUX_SINGLE(SD2_CMD), + PINMUX_SINGLE(SD3_CLK), + PINMUX_SINGLE(SD3_CMD), + PINMUX_SINGLE(SD3_DAT0), + PINMUX_SINGLE(SD3_DAT1), + PINMUX_SINGLE(SD3_DAT2), + PINMUX_SINGLE(SD3_DAT3), + PINMUX_SINGLE(SD3_DS), + PINMUX_SINGLE(SSI_SCK5), + PINMUX_SINGLE(SSI_SDATA5), + PINMUX_SINGLE(SSI_WS5), + /* IPSR0 */ PINMUX_IPSR_DATA(IP0_3_0, AVB_MDC), PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SS2_C, SEL_MSIOF2_2), @@ -1033,7 +1021,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_DATA(IP9_19_16, SD2_DAT3), PINMUX_IPSR_DATA(IP9_23_20, SD2_DS), - PINMUX_IPSR_MSEL(IP9_23_20, SATA_DEVSLP_B, SEL_SCIF_1), + PINMUX_IPSR_MSEL(IP9_23_20, SATA_DEVSLP_B, SEL_SATA_1), PINMUX_IPSR_DATA(IP9_27_24, SD3_DAT4), PINMUX_IPSR_MSEL(IP9_27_24, SD2_CD_A, SEL_SDHI2_0), @@ -1293,7 +1281,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_DATA(IP15_11_8, SSI_SDATA6), PINMUX_IPSR_MSEL(IP15_11_8, SIM0_CLK_D, SEL_SIMCARD_3), - PINMUX_IPSR_MSEL(IP15_11_8, SATA_DEVSLP_A, SEL_SCIF_0), + PINMUX_IPSR_MSEL(IP15_11_8, SATA_DEVSLP_A, SEL_SATA_0), PINMUX_IPSR_DATA(IP15_15_12, SSI_SCK78), PINMUX_IPSR_MSEL(IP15_15_12, HRX2_B, SEL_HSCIF2_1), @@ -1612,6 +1600,191 @@ static const unsigned int avb_avtp_capture_b_mux[] = { AVB_AVTP_CAPTURE_B_MARK, }; +/* - HSCIF0 ----------------------------------------------------------------- */ +static const unsigned int hscif0_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14), +}; +static const unsigned int hscif0_data_mux[] = { + HRX0_MARK, HTX0_MARK, +}; +static const unsigned int hscif0_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 12), +}; +static const unsigned int hscif0_clk_mux[] = { + HSCK0_MARK, +}; +static const unsigned int hscif0_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15), +}; +static const unsigned int hscif0_ctrl_mux[] = { + HRTS0_N_MARK, HCTS0_N_MARK, +}; +/* - HSCIF1 ----------------------------------------------------------------- */ +static const unsigned int hscif1_data_a_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6), +}; +static const unsigned int hscif1_data_a_mux[] = { + HRX1_A_MARK, HTX1_A_MARK, +}; +static const unsigned int hscif1_clk_a_pins[] = { + /* SCK */ + RCAR_GP_PIN(6, 21), +}; +static const unsigned int hscif1_clk_a_mux[] = { + HSCK1_A_MARK, +}; +static const unsigned int hscif1_ctrl_a_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7), +}; +static const unsigned int hscif1_ctrl_a_mux[] = { + HRTS1_N_A_MARK, HCTS1_N_A_MARK, +}; + +static const unsigned int hscif1_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2), +}; +static const unsigned int hscif1_data_b_mux[] = { + HRX1_B_MARK, HTX1_B_MARK, +}; +static const unsigned int hscif1_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 0), +}; +static const unsigned int hscif1_clk_b_mux[] = { + HSCK1_B_MARK, +}; +static const unsigned int hscif1_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3), +}; +static const unsigned int hscif1_ctrl_b_mux[] = { + HRTS1_N_B_MARK, HCTS1_N_B_MARK, +}; +/* - HSCIF2 ----------------------------------------------------------------- */ +static const unsigned int hscif2_data_a_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; +static const unsigned int hscif2_data_a_mux[] = { + HRX2_A_MARK, HTX2_A_MARK, +}; +static const unsigned int hscif2_clk_a_pins[] = { + /* SCK */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int hscif2_clk_a_mux[] = { + HSCK2_A_MARK, +}; +static const unsigned int hscif2_ctrl_a_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6), +}; +static const unsigned int hscif2_ctrl_a_mux[] = { + HRTS2_N_A_MARK, HCTS2_N_A_MARK, +}; + +static const unsigned int hscif2_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; +static const unsigned int hscif2_data_b_mux[] = { + HRX2_B_MARK, HTX2_B_MARK, +}; +static const unsigned int hscif2_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(6, 21), +}; +static const unsigned int hscif2_clk_b_mux[] = { + HSCK1_B_MARK, +}; +static const unsigned int hscif2_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19), +}; +static const unsigned int hscif2_ctrl_b_mux[] = { + HRTS2_N_B_MARK, HCTS2_N_B_MARK, +}; +/* - HSCIF3 ----------------------------------------------------------------- */ +static const unsigned int hscif3_data_a_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24), +}; +static const unsigned int hscif3_data_a_mux[] = { + HRX3_A_MARK, HTX3_A_MARK, +}; +static const unsigned int hscif3_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 22), +}; +static const unsigned int hscif3_clk_mux[] = { + HSCK3_MARK, +}; +static const unsigned int hscif3_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25), +}; +static const unsigned int hscif3_ctrl_mux[] = { + HRTS3_N_MARK, HCTS3_N_MARK, +}; + +static const unsigned int hscif3_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11), +}; +static const unsigned int hscif3_data_b_mux[] = { + HRX3_B_MARK, HTX3_B_MARK, +}; +static const unsigned int hscif3_data_c_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15), +}; +static const unsigned int hscif3_data_c_mux[] = { + HRX3_C_MARK, HTX3_C_MARK, +}; +static const unsigned int hscif3_data_d_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8), +}; +static const unsigned int hscif3_data_d_mux[] = { + HRX3_D_MARK, HTX3_D_MARK, +}; +/* - HSCIF4 ----------------------------------------------------------------- */ +static const unsigned int hscif4_data_a_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), +}; +static const unsigned int hscif4_data_a_mux[] = { + HRX4_A_MARK, HTX4_A_MARK, +}; +static const unsigned int hscif4_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 11), +}; +static const unsigned int hscif4_clk_mux[] = { + HSCK4_MARK, +}; +static const unsigned int hscif4_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), +}; +static const unsigned int hscif4_ctrl_mux[] = { + HRTS4_N_MARK, HCTS3_N_MARK, +}; + +static const unsigned int hscif4_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11), +}; +static const unsigned int hscif4_data_b_mux[] = { + HRX4_B_MARK, HTX4_B_MARK, +}; + /* - I2C -------------------------------------------------------------------- */ static const unsigned int i2c1_a_pins[] = { /* SDA, SCL */ @@ -1663,6 +1836,678 @@ static const unsigned int i2c6_c_mux[] = { SDA6_C_MARK, SCL6_C_MARK, }; +/* - MSIOF0 ----------------------------------------------------------------- */ +static const unsigned int msiof0_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 17), +}; +static const unsigned int msiof0_clk_mux[] = { + MSIOF0_SCK_MARK, +}; +static const unsigned int msiof0_sync_pins[] = { + /* SYNC */ + RCAR_GP_PIN(5, 18), +}; +static const unsigned int msiof0_sync_mux[] = { + MSIOF0_SYNC_MARK, +}; +static const unsigned int msiof0_ss1_pins[] = { + /* SS1 */ + RCAR_GP_PIN(5, 19), +}; +static const unsigned int msiof0_ss1_mux[] = { + MSIOF0_SS1_MARK, +}; +static const unsigned int msiof0_ss2_pins[] = { + /* SS2 */ + RCAR_GP_PIN(5, 21), +}; +static const unsigned int msiof0_ss2_mux[] = { + MSIOF0_SS2_MARK, +}; +static const unsigned int msiof0_txd_pins[] = { + /* TXD */ + RCAR_GP_PIN(5, 20), +}; +static const unsigned int msiof0_txd_mux[] = { + MSIOF0_TXD_MARK, +}; +static const unsigned int msiof0_rxd_pins[] = { + /* RXD */ + RCAR_GP_PIN(5, 22), +}; +static const unsigned int msiof0_rxd_mux[] = { + MSIOF0_RXD_MARK, +}; +/* - MSIOF1 ----------------------------------------------------------------- */ +static const unsigned int msiof1_clk_a_pins[] = { + /* SCK */ + RCAR_GP_PIN(6, 8), +}; +static const unsigned int msiof1_clk_a_mux[] = { + MSIOF1_SCK_A_MARK, +}; +static const unsigned int msiof1_sync_a_pins[] = { + /* SYNC */ + RCAR_GP_PIN(6, 9), +}; +static const unsigned int msiof1_sync_a_mux[] = { + MSIOF1_SYNC_A_MARK, +}; +static const unsigned int msiof1_ss1_a_pins[] = { + /* SS1 */ + RCAR_GP_PIN(6, 5), +}; +static const unsigned int msiof1_ss1_a_mux[] = { + MSIOF1_SS1_A_MARK, +}; +static const unsigned int msiof1_ss2_a_pins[] = { + /* SS2 */ + RCAR_GP_PIN(6, 6), +}; +static const unsigned int msiof1_ss2_a_mux[] = { + MSIOF1_SS2_A_MARK, +}; +static const unsigned int msiof1_txd_a_pins[] = { + /* TXD */ + RCAR_GP_PIN(6, 7), +}; +static const unsigned int msiof1_txd_a_mux[] = { + MSIOF1_TXD_A_MARK, +}; +static const unsigned int msiof1_rxd_a_pins[] = { + /* RXD */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int msiof1_rxd_a_mux[] = { + MSIOF1_RXD_A_MARK, +}; +static const unsigned int msiof1_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 9), +}; +static const unsigned int msiof1_clk_b_mux[] = { + MSIOF1_SCK_B_MARK, +}; +static const unsigned int msiof1_sync_b_pins[] = { + /* SYNC */ + RCAR_GP_PIN(5, 3), +}; +static const unsigned int msiof1_sync_b_mux[] = { + MSIOF1_SYNC_B_MARK, +}; +static const unsigned int msiof1_ss1_b_pins[] = { + /* SS1 */ + RCAR_GP_PIN(5, 4), +}; +static const unsigned int msiof1_ss1_b_mux[] = { + MSIOF1_SS1_B_MARK, +}; +static const unsigned int msiof1_ss2_b_pins[] = { + /* SS2 */ + RCAR_GP_PIN(5, 0), +}; +static const unsigned int msiof1_ss2_b_mux[] = { + MSIOF1_SS2_B_MARK, +}; +static const unsigned int msiof1_txd_b_pins[] = { + /* TXD */ + RCAR_GP_PIN(5, 8), +}; +static const unsigned int msiof1_txd_b_mux[] = { + MSIOF1_TXD_B_MARK, +}; +static const unsigned int msiof1_rxd_b_pins[] = { + /* RXD */ + RCAR_GP_PIN(5, 7), +}; +static const unsigned int msiof1_rxd_b_mux[] = { + MSIOF1_RXD_B_MARK, +}; +static const unsigned int msiof1_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(6, 17), +}; +static const unsigned int msiof1_clk_c_mux[] = { + MSIOF1_SCK_C_MARK, +}; +static const unsigned int msiof1_sync_c_pins[] = { + /* SYNC */ + RCAR_GP_PIN(6, 18), +}; +static const unsigned int msiof1_sync_c_mux[] = { + MSIOF1_SYNC_C_MARK, +}; +static const unsigned int msiof1_ss1_c_pins[] = { + /* SS1 */ + RCAR_GP_PIN(6, 21), +}; +static const unsigned int msiof1_ss1_c_mux[] = { + MSIOF1_SS1_C_MARK, +}; +static const unsigned int msiof1_ss2_c_pins[] = { + /* SS2 */ + RCAR_GP_PIN(6, 27), +}; +static const unsigned int msiof1_ss2_c_mux[] = { + MSIOF1_SS2_C_MARK, +}; +static const unsigned int msiof1_txd_c_pins[] = { + /* TXD */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int msiof1_txd_c_mux[] = { + MSIOF1_TXD_C_MARK, +}; +static const unsigned int msiof1_rxd_c_pins[] = { + /* RXD */ + RCAR_GP_PIN(6, 19), +}; +static const unsigned int msiof1_rxd_c_mux[] = { + MSIOF1_RXD_C_MARK, +}; +static const unsigned int msiof1_clk_d_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 12), +}; +static const unsigned int msiof1_clk_d_mux[] = { + MSIOF1_SCK_D_MARK, +}; +static const unsigned int msiof1_sync_d_pins[] = { + /* SYNC */ + RCAR_GP_PIN(5, 15), +}; +static const unsigned int msiof1_sync_d_mux[] = { + MSIOF1_SYNC_D_MARK, +}; +static const unsigned int msiof1_ss1_d_pins[] = { + /* SS1 */ + RCAR_GP_PIN(5, 16), +}; +static const unsigned int msiof1_ss1_d_mux[] = { + MSIOF1_SS1_D_MARK, +}; +static const unsigned int msiof1_ss2_d_pins[] = { + /* SS2 */ + RCAR_GP_PIN(5, 21), +}; +static const unsigned int msiof1_ss2_d_mux[] = { + MSIOF1_SS2_D_MARK, +}; +static const unsigned int msiof1_txd_d_pins[] = { + /* TXD */ + RCAR_GP_PIN(5, 14), +}; +static const unsigned int msiof1_txd_d_mux[] = { + MSIOF1_TXD_D_MARK, +}; +static const unsigned int msiof1_rxd_d_pins[] = { + /* RXD */ + RCAR_GP_PIN(5, 13), +}; +static const unsigned int msiof1_rxd_d_mux[] = { + MSIOF1_RXD_D_MARK, +}; +static const unsigned int msiof1_clk_e_pins[] = { + /* SCK */ + RCAR_GP_PIN(3, 0), +}; +static const unsigned int msiof1_clk_e_mux[] = { + MSIOF1_SCK_E_MARK, +}; +static const unsigned int msiof1_sync_e_pins[] = { + /* SYNC */ + RCAR_GP_PIN(3, 1), +}; +static const unsigned int msiof1_sync_e_mux[] = { + MSIOF1_SYNC_E_MARK, +}; +static const unsigned int msiof1_ss1_e_pins[] = { + /* SS1 */ + RCAR_GP_PIN(3, 4), +}; +static const unsigned int msiof1_ss1_e_mux[] = { + MSIOF1_SS1_E_MARK, +}; +static const unsigned int msiof1_ss2_e_pins[] = { + /* SS2 */ + RCAR_GP_PIN(3, 5), +}; +static const unsigned int msiof1_ss2_e_mux[] = { + MSIOF1_SS2_E_MARK, +}; +static const unsigned int msiof1_txd_e_pins[] = { + /* TXD */ + RCAR_GP_PIN(3, 3), +}; +static const unsigned int msiof1_txd_e_mux[] = { + MSIOF1_TXD_E_MARK, +}; +static const unsigned int msiof1_rxd_e_pins[] = { + /* RXD */ + RCAR_GP_PIN(3, 2), +}; +static const unsigned int msiof1_rxd_e_mux[] = { + MSIOF1_RXD_E_MARK, +}; +static const unsigned int msiof1_clk_f_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 23), +}; +static const unsigned int msiof1_clk_f_mux[] = { + MSIOF1_SCK_F_MARK, +}; +static const unsigned int msiof1_sync_f_pins[] = { + /* SYNC */ + RCAR_GP_PIN(5, 24), +}; +static const unsigned int msiof1_sync_f_mux[] = { + MSIOF1_SYNC_F_MARK, +}; +static const unsigned int msiof1_ss1_f_pins[] = { + /* SS1 */ + RCAR_GP_PIN(6, 1), +}; +static const unsigned int msiof1_ss1_f_mux[] = { + MSIOF1_SS1_F_MARK, +}; +static const unsigned int msiof1_ss2_f_pins[] = { + /* SS2 */ + RCAR_GP_PIN(6, 2), +}; +static const unsigned int msiof1_ss2_f_mux[] = { + MSIOF1_SS2_F_MARK, +}; +static const unsigned int msiof1_txd_f_pins[] = { + /* TXD */ + RCAR_GP_PIN(6, 0), +}; +static const unsigned int msiof1_txd_f_mux[] = { + MSIOF1_TXD_F_MARK, +}; +static const unsigned int msiof1_rxd_f_pins[] = { + /* RXD */ + RCAR_GP_PIN(5, 25), +}; +static const unsigned int msiof1_rxd_f_mux[] = { + MSIOF1_RXD_F_MARK, +}; +static const unsigned int msiof1_clk_g_pins[] = { + /* SCK */ + RCAR_GP_PIN(3, 6), +}; +static const unsigned int msiof1_clk_g_mux[] = { + MSIOF1_SCK_G_MARK, +}; +static const unsigned int msiof1_sync_g_pins[] = { + /* SYNC */ + RCAR_GP_PIN(3, 7), +}; +static const unsigned int msiof1_sync_g_mux[] = { + MSIOF1_SYNC_G_MARK, +}; +static const unsigned int msiof1_ss1_g_pins[] = { + /* SS1 */ + RCAR_GP_PIN(3, 10), +}; +static const unsigned int msiof1_ss1_g_mux[] = { + MSIOF1_SS1_G_MARK, +}; +static const unsigned int msiof1_ss2_g_pins[] = { + /* SS2 */ + RCAR_GP_PIN(3, 11), +}; +static const unsigned int msiof1_ss2_g_mux[] = { + MSIOF1_SS2_G_MARK, +}; +static const unsigned int msiof1_txd_g_pins[] = { + /* TXD */ + RCAR_GP_PIN(3, 9), +}; +static const unsigned int msiof1_txd_g_mux[] = { + MSIOF1_TXD_G_MARK, +}; +static const unsigned int msiof1_rxd_g_pins[] = { + /* RXD */ + RCAR_GP_PIN(3, 8), +}; +static const unsigned int msiof1_rxd_g_mux[] = { + MSIOF1_RXD_G_MARK, +}; +/* - MSIOF2 ----------------------------------------------------------------- */ +static const unsigned int msiof2_clk_a_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 9), +}; +static const unsigned int msiof2_clk_a_mux[] = { + MSIOF2_SCK_A_MARK, +}; +static const unsigned int msiof2_sync_a_pins[] = { + /* SYNC */ + RCAR_GP_PIN(1, 8), +}; +static const unsigned int msiof2_sync_a_mux[] = { + MSIOF2_SYNC_A_MARK, +}; +static const unsigned int msiof2_ss1_a_pins[] = { + /* SS1 */ + RCAR_GP_PIN(1, 6), +}; +static const unsigned int msiof2_ss1_a_mux[] = { + MSIOF2_SS1_A_MARK, +}; +static const unsigned int msiof2_ss2_a_pins[] = { + /* SS2 */ + RCAR_GP_PIN(1, 7), +}; +static const unsigned int msiof2_ss2_a_mux[] = { + MSIOF2_SS2_A_MARK, +}; +static const unsigned int msiof2_txd_a_pins[] = { + /* TXD */ + RCAR_GP_PIN(1, 11), +}; +static const unsigned int msiof2_txd_a_mux[] = { + MSIOF2_TXD_A_MARK, +}; +static const unsigned int msiof2_rxd_a_pins[] = { + /* RXD */ + RCAR_GP_PIN(1, 10), +}; +static const unsigned int msiof2_rxd_a_mux[] = { + MSIOF2_RXD_A_MARK, +}; +static const unsigned int msiof2_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(0, 4), +}; +static const unsigned int msiof2_clk_b_mux[] = { + MSIOF2_SCK_B_MARK, +}; +static const unsigned int msiof2_sync_b_pins[] = { + /* SYNC */ + RCAR_GP_PIN(0, 5), +}; +static const unsigned int msiof2_sync_b_mux[] = { + MSIOF2_SYNC_B_MARK, +}; +static const unsigned int msiof2_ss1_b_pins[] = { + /* SS1 */ + RCAR_GP_PIN(0, 0), +}; +static const unsigned int msiof2_ss1_b_mux[] = { + MSIOF2_SS1_B_MARK, +}; +static const unsigned int msiof2_ss2_b_pins[] = { + /* SS2 */ + RCAR_GP_PIN(0, 1), +}; +static const unsigned int msiof2_ss2_b_mux[] = { + MSIOF2_SS2_B_MARK, +}; +static const unsigned int msiof2_txd_b_pins[] = { + /* TXD */ + RCAR_GP_PIN(0, 7), +}; +static const unsigned int msiof2_txd_b_mux[] = { + MSIOF2_TXD_B_MARK, +}; +static const unsigned int msiof2_rxd_b_pins[] = { + /* RXD */ + RCAR_GP_PIN(0, 6), +}; +static const unsigned int msiof2_rxd_b_mux[] = { + MSIOF2_RXD_B_MARK, +}; +static const unsigned int msiof2_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(2, 12), +}; +static const unsigned int msiof2_clk_c_mux[] = { + MSIOF2_SCK_C_MARK, +}; +static const unsigned int msiof2_sync_c_pins[] = { + /* SYNC */ + RCAR_GP_PIN(2, 11), +}; +static const unsigned int msiof2_sync_c_mux[] = { + MSIOF2_SYNC_C_MARK, +}; +static const unsigned int msiof2_ss1_c_pins[] = { + /* SS1 */ + RCAR_GP_PIN(2, 10), +}; +static const unsigned int msiof2_ss1_c_mux[] = { + MSIOF2_SS1_C_MARK, +}; +static const unsigned int msiof2_ss2_c_pins[] = { + /* SS2 */ + RCAR_GP_PIN(2, 9), +}; +static const unsigned int msiof2_ss2_c_mux[] = { + MSIOF2_SS2_C_MARK, +}; +static const unsigned int msiof2_txd_c_pins[] = { + /* TXD */ + RCAR_GP_PIN(2, 14), +}; +static const unsigned int msiof2_txd_c_mux[] = { + MSIOF2_TXD_C_MARK, +}; +static const unsigned int msiof2_rxd_c_pins[] = { + /* RXD */ + RCAR_GP_PIN(2, 13), +}; +static const unsigned int msiof2_rxd_c_mux[] = { + MSIOF2_RXD_C_MARK, +}; +static const unsigned int msiof2_clk_d_pins[] = { + /* SCK */ + RCAR_GP_PIN(0, 8), +}; +static const unsigned int msiof2_clk_d_mux[] = { + MSIOF2_SCK_D_MARK, +}; +static const unsigned int msiof2_sync_d_pins[] = { + /* SYNC */ + RCAR_GP_PIN(0, 9), +}; +static const unsigned int msiof2_sync_d_mux[] = { + MSIOF2_SYNC_D_MARK, +}; +static const unsigned int msiof2_ss1_d_pins[] = { + /* SS1 */ + RCAR_GP_PIN(0, 12), +}; +static const unsigned int msiof2_ss1_d_mux[] = { + MSIOF2_SS1_D_MARK, +}; +static const unsigned int msiof2_ss2_d_pins[] = { + /* SS2 */ + RCAR_GP_PIN(0, 13), +}; +static const unsigned int msiof2_ss2_d_mux[] = { + MSIOF2_SS2_D_MARK, +}; +static const unsigned int msiof2_txd_d_pins[] = { + /* TXD */ + RCAR_GP_PIN(0, 11), +}; +static const unsigned int msiof2_txd_d_mux[] = { + MSIOF2_TXD_D_MARK, +}; +static const unsigned int msiof2_rxd_d_pins[] = { + /* RXD */ + RCAR_GP_PIN(0, 10), +}; +static const unsigned int msiof2_rxd_d_mux[] = { + MSIOF2_RXD_D_MARK, +}; +/* - MSIOF3 ----------------------------------------------------------------- */ +static const unsigned int msiof3_clk_a_pins[] = { + /* SCK */ + RCAR_GP_PIN(0, 0), +}; +static const unsigned int msiof3_clk_a_mux[] = { + MSIOF3_SCK_A_MARK, +}; +static const unsigned int msiof3_sync_a_pins[] = { + /* SYNC */ + RCAR_GP_PIN(0, 1), +}; +static const unsigned int msiof3_sync_a_mux[] = { + MSIOF3_SYNC_A_MARK, +}; +static const unsigned int msiof3_ss1_a_pins[] = { + /* SS1 */ + RCAR_GP_PIN(0, 14), +}; +static const unsigned int msiof3_ss1_a_mux[] = { + MSIOF3_SS1_A_MARK, +}; +static const unsigned int msiof3_ss2_a_pins[] = { + /* SS2 */ + RCAR_GP_PIN(0, 15), +}; +static const unsigned int msiof3_ss2_a_mux[] = { + MSIOF3_SS2_A_MARK, +}; +static const unsigned int msiof3_txd_a_pins[] = { + /* TXD */ + RCAR_GP_PIN(0, 3), +}; +static const unsigned int msiof3_txd_a_mux[] = { + MSIOF3_TXD_A_MARK, +}; +static const unsigned int msiof3_rxd_a_pins[] = { + /* RXD */ + RCAR_GP_PIN(0, 2), +}; +static const unsigned int msiof3_rxd_a_mux[] = { + MSIOF3_RXD_A_MARK, +}; +static const unsigned int msiof3_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 2), +}; +static const unsigned int msiof3_clk_b_mux[] = { + MSIOF3_SCK_B_MARK, +}; +static const unsigned int msiof3_sync_b_pins[] = { + /* SYNC */ + RCAR_GP_PIN(1, 0), +}; +static const unsigned int msiof3_sync_b_mux[] = { + MSIOF3_SYNC_B_MARK, +}; +static const unsigned int msiof3_ss1_b_pins[] = { + /* SS1 */ + RCAR_GP_PIN(1, 4), +}; +static const unsigned int msiof3_ss1_b_mux[] = { + MSIOF3_SS1_B_MARK, +}; +static const unsigned int msiof3_ss2_b_pins[] = { + /* SS2 */ + RCAR_GP_PIN(1, 5), +}; +static const unsigned int msiof3_ss2_b_mux[] = { + MSIOF3_SS2_B_MARK, +}; +static const unsigned int msiof3_txd_b_pins[] = { + /* TXD */ + RCAR_GP_PIN(1, 1), +}; +static const unsigned int msiof3_txd_b_mux[] = { + MSIOF3_TXD_B_MARK, +}; +static const unsigned int msiof3_rxd_b_pins[] = { + /* RXD */ + RCAR_GP_PIN(1, 3), +}; +static const unsigned int msiof3_rxd_b_mux[] = { + MSIOF3_RXD_B_MARK, +}; +static const unsigned int msiof3_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 12), +}; +static const unsigned int msiof3_clk_c_mux[] = { + MSIOF3_SCK_C_MARK, +}; +static const unsigned int msiof3_sync_c_pins[] = { + /* SYNC */ + RCAR_GP_PIN(1, 13), +}; +static const unsigned int msiof3_sync_c_mux[] = { + MSIOF3_SYNC_C_MARK, +}; +static const unsigned int msiof3_txd_c_pins[] = { + /* TXD */ + RCAR_GP_PIN(1, 15), +}; +static const unsigned int msiof3_txd_c_mux[] = { + MSIOF3_TXD_C_MARK, +}; +static const unsigned int msiof3_rxd_c_pins[] = { + /* RXD */ + RCAR_GP_PIN(1, 14), +}; +static const unsigned int msiof3_rxd_c_mux[] = { + MSIOF3_RXD_C_MARK, +}; +static const unsigned int msiof3_clk_d_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 22), +}; +static const unsigned int msiof3_clk_d_mux[] = { + MSIOF3_SCK_D_MARK, +}; +static const unsigned int msiof3_sync_d_pins[] = { + /* SYNC */ + RCAR_GP_PIN(1, 23), +}; +static const unsigned int msiof3_sync_d_mux[] = { + MSIOF3_SYNC_D_MARK, +}; +static const unsigned int msiof3_ss1_d_pins[] = { + /* SS1 */ + RCAR_GP_PIN(1, 26), +}; +static const unsigned int msiof3_ss1_d_mux[] = { + MSIOF3_SS1_D_MARK, +}; +static const unsigned int msiof3_txd_d_pins[] = { + /* TXD */ + RCAR_GP_PIN(1, 25), +}; +static const unsigned int msiof3_txd_d_mux[] = { + MSIOF3_TXD_D_MARK, +}; +static const unsigned int msiof3_rxd_d_pins[] = { + /* RXD */ + RCAR_GP_PIN(1, 24), +}; +static const unsigned int msiof3_rxd_d_mux[] = { + MSIOF3_RXD_D_MARK, +}; + +/* - SATA --------------------------------------------------------------------*/ +static const unsigned int sata0_devslp_a_pins[] = { + /* DEVSLP */ + RCAR_GP_PIN(6, 16), +}; +static const unsigned int sata0_devslp_a_mux[] = { + SATA_DEVSLP_A_MARK, +}; +static const unsigned int sata0_devslp_b_pins[] = { + /* DEVSLP */ + RCAR_GP_PIN(4, 6), +}; +static const unsigned int sata0_devslp_b_mux[] = { + SATA_DEVSLP_B_MARK, +}; + /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_pins[] = { /* RX, TX */ @@ -1845,6 +2690,228 @@ static const unsigned int scif5_clk_pins[] = { static const unsigned int scif5_clk_mux[] = { SCK5_MARK, }; +/* - SDHI0 ------------------------------------------------------------------ */ +static const unsigned int sdhi0_data1_pins[] = { + /* D0 */ + RCAR_GP_PIN(3, 2), +}; +static const unsigned int sdhi0_data1_mux[] = { + SD0_DAT0_MARK, +}; +static const unsigned int sdhi0_data4_pins[] = { + /* D[0:3] */ + RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), + RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), +}; +static const unsigned int sdhi0_data4_mux[] = { + SD0_DAT0_MARK, SD0_DAT1_MARK, + SD0_DAT2_MARK, SD0_DAT3_MARK, +}; +static const unsigned int sdhi0_ctrl_pins[] = { + /* CLK, CMD */ + RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1), +}; +static const unsigned int sdhi0_ctrl_mux[] = { + SD0_CLK_MARK, SD0_CMD_MARK, +}; +static const unsigned int sdhi0_cd_pins[] = { + /* CD */ + RCAR_GP_PIN(3, 12), +}; +static const unsigned int sdhi0_cd_mux[] = { + SD0_CD_MARK, +}; +static const unsigned int sdhi0_wp_pins[] = { + /* WP */ + RCAR_GP_PIN(3, 13), +}; +static const unsigned int sdhi0_wp_mux[] = { + SD0_WP_MARK, +}; +/* - SDHI1 ------------------------------------------------------------------ */ +static const unsigned int sdhi1_data1_pins[] = { + /* D0 */ + RCAR_GP_PIN(3, 8), +}; +static const unsigned int sdhi1_data1_mux[] = { + SD1_DAT0_MARK, +}; +static const unsigned int sdhi1_data4_pins[] = { + /* D[0:3] */ + RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), + RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), +}; +static const unsigned int sdhi1_data4_mux[] = { + SD1_DAT0_MARK, SD1_DAT1_MARK, + SD1_DAT2_MARK, SD1_DAT3_MARK, +}; +static const unsigned int sdhi1_ctrl_pins[] = { + /* CLK, CMD */ + RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), +}; +static const unsigned int sdhi1_ctrl_mux[] = { + SD1_CLK_MARK, SD1_CMD_MARK, +}; +static const unsigned int sdhi1_cd_pins[] = { + /* CD */ + RCAR_GP_PIN(3, 14), +}; +static const unsigned int sdhi1_cd_mux[] = { + SD1_CD_MARK, +}; +static const unsigned int sdhi1_wp_pins[] = { + /* WP */ + RCAR_GP_PIN(3, 15), +}; +static const unsigned int sdhi1_wp_mux[] = { + SD1_WP_MARK, +}; +/* - SDHI2 ------------------------------------------------------------------ */ +static const unsigned int sdhi2_data1_pins[] = { + /* D0 */ + RCAR_GP_PIN(4, 2), +}; +static const unsigned int sdhi2_data1_mux[] = { + SD2_DAT0_MARK, +}; +static const unsigned int sdhi2_data4_pins[] = { + /* D[0:3] */ + RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3), + RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5), +}; +static const unsigned int sdhi2_data4_mux[] = { + SD2_DAT0_MARK, SD2_DAT1_MARK, + SD2_DAT2_MARK, SD2_DAT3_MARK, +}; +static const unsigned int sdhi2_data8_pins[] = { + /* D[0:7] */ + RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3), + RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5), + RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), + RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), +}; +static const unsigned int sdhi2_data8_mux[] = { + SD2_DAT0_MARK, SD2_DAT1_MARK, + SD2_DAT2_MARK, SD2_DAT3_MARK, + SD2_DAT4_MARK, SD2_DAT5_MARK, + SD2_DAT6_MARK, SD2_DAT7_MARK, +}; +static const unsigned int sdhi2_ctrl_pins[] = { + /* CLK, CMD */ + RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1), +}; +static const unsigned int sdhi2_ctrl_mux[] = { + SD2_CLK_MARK, SD2_CMD_MARK, +}; +static const unsigned int sdhi2_cd_a_pins[] = { + /* CD */ + RCAR_GP_PIN(4, 13), +}; +static const unsigned int sdhi2_cd_a_mux[] = { + SD2_CD_A_MARK, +}; +static const unsigned int sdhi2_cd_b_pins[] = { + /* CD */ + RCAR_GP_PIN(5, 10), +}; +static const unsigned int sdhi2_cd_b_mux[] = { + SD2_CD_B_MARK, +}; +static const unsigned int sdhi2_wp_a_pins[] = { + /* WP */ + RCAR_GP_PIN(4, 14), +}; +static const unsigned int sdhi2_wp_a_mux[] = { + SD2_WP_A_MARK, +}; +static const unsigned int sdhi2_wp_b_pins[] = { + /* WP */ + RCAR_GP_PIN(5, 11), +}; +static const unsigned int sdhi2_wp_b_mux[] = { + SD2_WP_B_MARK, +}; +static const unsigned int sdhi2_ds_pins[] = { + /* DS */ + RCAR_GP_PIN(4, 6), +}; +static const unsigned int sdhi2_ds_mux[] = { + SD2_DS_MARK, +}; +/* - SDHI3 ------------------------------------------------------------------ */ +static const unsigned int sdhi3_data1_pins[] = { + /* D0 */ + RCAR_GP_PIN(4, 9), +}; +static const unsigned int sdhi3_data1_mux[] = { + SD3_DAT0_MARK, +}; +static const unsigned int sdhi3_data4_pins[] = { + /* D[0:3] */ + RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10), + RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12), +}; +static const unsigned int sdhi3_data4_mux[] = { + SD3_DAT0_MARK, SD3_DAT1_MARK, + SD3_DAT2_MARK, SD3_DAT3_MARK, +}; +static const unsigned int sdhi3_data8_pins[] = { + /* D[0:7] */ + RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10), + RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12), + RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14), + RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16), +}; +static const unsigned int sdhi3_data8_mux[] = { + SD3_DAT0_MARK, SD3_DAT1_MARK, + SD3_DAT2_MARK, SD3_DAT3_MARK, + SD3_DAT4_MARK, SD3_DAT5_MARK, + SD3_DAT6_MARK, SD3_DAT7_MARK, +}; +static const unsigned int sdhi3_ctrl_pins[] = { + /* CLK, CMD */ + RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8), +}; +static const unsigned int sdhi3_ctrl_mux[] = { + SD3_CLK_MARK, SD3_CMD_MARK, +}; +static const unsigned int sdhi3_cd_pins[] = { + /* CD */ + RCAR_GP_PIN(4, 15), +}; +static const unsigned int sdhi3_cd_mux[] = { + SD3_CD_MARK, +}; +static const unsigned int sdhi3_wp_pins[] = { + /* WP */ + RCAR_GP_PIN(4, 16), +}; +static const unsigned int sdhi3_wp_mux[] = { + SD3_WP_MARK, +}; +static const unsigned int sdhi3_ds_pins[] = { + /* DS */ + RCAR_GP_PIN(4, 17), +}; +static const unsigned int sdhi3_ds_mux[] = { + SD3_DS_MARK, +}; + +/* - SCIF Clock ------------------------------------------------------------- */ +static const unsigned int scif_clk_a_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(6, 23), +}; +static const unsigned int scif_clk_a_mux[] = { + SCIF_CLK_A_MARK, +}; +static const unsigned int scif_clk_b_pins[] = { + /* SCIF_CLK */ + RCAR_GP_PIN(5, 9), +}; +static const unsigned int scif_clk_b_mux[] = { + SCIF_CLK_B_MARK, +}; /* - SSI -------------------------------------------------------------------- */ static const unsigned int ssi0_data_pins[] = { @@ -2050,6 +3117,31 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(avb_avtp_capture_a), SH_PFC_PIN_GROUP(avb_avtp_match_b), SH_PFC_PIN_GROUP(avb_avtp_capture_b), + SH_PFC_PIN_GROUP(hscif0_data), + SH_PFC_PIN_GROUP(hscif0_clk), + SH_PFC_PIN_GROUP(hscif0_ctrl), + SH_PFC_PIN_GROUP(hscif1_data_a), + SH_PFC_PIN_GROUP(hscif1_clk_a), + SH_PFC_PIN_GROUP(hscif1_ctrl_a), + SH_PFC_PIN_GROUP(hscif1_data_b), + SH_PFC_PIN_GROUP(hscif1_clk_b), + SH_PFC_PIN_GROUP(hscif1_ctrl_b), + SH_PFC_PIN_GROUP(hscif2_data_a), + SH_PFC_PIN_GROUP(hscif2_clk_a), + SH_PFC_PIN_GROUP(hscif2_ctrl_a), + SH_PFC_PIN_GROUP(hscif2_data_b), + SH_PFC_PIN_GROUP(hscif2_clk_b), + SH_PFC_PIN_GROUP(hscif2_ctrl_b), + SH_PFC_PIN_GROUP(hscif3_data_a), + SH_PFC_PIN_GROUP(hscif3_clk), + SH_PFC_PIN_GROUP(hscif3_ctrl), + SH_PFC_PIN_GROUP(hscif3_data_b), + SH_PFC_PIN_GROUP(hscif3_data_c), + SH_PFC_PIN_GROUP(hscif3_data_d), + SH_PFC_PIN_GROUP(hscif4_data_a), + SH_PFC_PIN_GROUP(hscif4_clk), + SH_PFC_PIN_GROUP(hscif4_ctrl), + SH_PFC_PIN_GROUP(hscif4_data_b), SH_PFC_PIN_GROUP(i2c1_a), SH_PFC_PIN_GROUP(i2c1_b), SH_PFC_PIN_GROUP(i2c2_a), @@ -2057,6 +3149,101 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(i2c6_a), SH_PFC_PIN_GROUP(i2c6_b), SH_PFC_PIN_GROUP(i2c6_c), + SH_PFC_PIN_GROUP(msiof0_clk), + SH_PFC_PIN_GROUP(msiof0_sync), + SH_PFC_PIN_GROUP(msiof0_ss1), + SH_PFC_PIN_GROUP(msiof0_ss2), + SH_PFC_PIN_GROUP(msiof0_txd), + SH_PFC_PIN_GROUP(msiof0_rxd), + SH_PFC_PIN_GROUP(msiof1_clk_a), + SH_PFC_PIN_GROUP(msiof1_sync_a), + SH_PFC_PIN_GROUP(msiof1_ss1_a), + SH_PFC_PIN_GROUP(msiof1_ss2_a), + SH_PFC_PIN_GROUP(msiof1_txd_a), + SH_PFC_PIN_GROUP(msiof1_rxd_a), + SH_PFC_PIN_GROUP(msiof1_clk_b), + SH_PFC_PIN_GROUP(msiof1_sync_b), + SH_PFC_PIN_GROUP(msiof1_ss1_b), + SH_PFC_PIN_GROUP(msiof1_ss2_b), + SH_PFC_PIN_GROUP(msiof1_txd_b), + SH_PFC_PIN_GROUP(msiof1_rxd_b), + SH_PFC_PIN_GROUP(msiof1_clk_c), + SH_PFC_PIN_GROUP(msiof1_sync_c), + SH_PFC_PIN_GROUP(msiof1_ss1_c), + SH_PFC_PIN_GROUP(msiof1_ss2_c), + SH_PFC_PIN_GROUP(msiof1_txd_c), + SH_PFC_PIN_GROUP(msiof1_rxd_c), + SH_PFC_PIN_GROUP(msiof1_clk_d), + SH_PFC_PIN_GROUP(msiof1_sync_d), + SH_PFC_PIN_GROUP(msiof1_ss1_d), + SH_PFC_PIN_GROUP(msiof1_ss2_d), + SH_PFC_PIN_GROUP(msiof1_txd_d), + SH_PFC_PIN_GROUP(msiof1_rxd_d), + SH_PFC_PIN_GROUP(msiof1_clk_e), + SH_PFC_PIN_GROUP(msiof1_sync_e), + SH_PFC_PIN_GROUP(msiof1_ss1_e), + SH_PFC_PIN_GROUP(msiof1_ss2_e), + SH_PFC_PIN_GROUP(msiof1_txd_e), + SH_PFC_PIN_GROUP(msiof1_rxd_e), + SH_PFC_PIN_GROUP(msiof1_clk_f), + SH_PFC_PIN_GROUP(msiof1_sync_f), + SH_PFC_PIN_GROUP(msiof1_ss1_f), + SH_PFC_PIN_GROUP(msiof1_ss2_f), + SH_PFC_PIN_GROUP(msiof1_txd_f), + SH_PFC_PIN_GROUP(msiof1_rxd_f), + SH_PFC_PIN_GROUP(msiof1_clk_g), + SH_PFC_PIN_GROUP(msiof1_sync_g), + SH_PFC_PIN_GROUP(msiof1_ss1_g), + SH_PFC_PIN_GROUP(msiof1_ss2_g), + SH_PFC_PIN_GROUP(msiof1_txd_g), + SH_PFC_PIN_GROUP(msiof1_rxd_g), + SH_PFC_PIN_GROUP(msiof2_clk_a), + SH_PFC_PIN_GROUP(msiof2_sync_a), + SH_PFC_PIN_GROUP(msiof2_ss1_a), + SH_PFC_PIN_GROUP(msiof2_ss2_a), + SH_PFC_PIN_GROUP(msiof2_txd_a), + SH_PFC_PIN_GROUP(msiof2_rxd_a), + SH_PFC_PIN_GROUP(msiof2_clk_b), + SH_PFC_PIN_GROUP(msiof2_sync_b), + SH_PFC_PIN_GROUP(msiof2_ss1_b), + SH_PFC_PIN_GROUP(msiof2_ss2_b), + SH_PFC_PIN_GROUP(msiof2_txd_b), + SH_PFC_PIN_GROUP(msiof2_rxd_b), + SH_PFC_PIN_GROUP(msiof2_clk_c), + SH_PFC_PIN_GROUP(msiof2_sync_c), + SH_PFC_PIN_GROUP(msiof2_ss1_c), + SH_PFC_PIN_GROUP(msiof2_ss2_c), + SH_PFC_PIN_GROUP(msiof2_txd_c), + SH_PFC_PIN_GROUP(msiof2_rxd_c), + SH_PFC_PIN_GROUP(msiof2_clk_d), + SH_PFC_PIN_GROUP(msiof2_sync_d), + SH_PFC_PIN_GROUP(msiof2_ss1_d), + SH_PFC_PIN_GROUP(msiof2_ss2_d), + SH_PFC_PIN_GROUP(msiof2_txd_d), + SH_PFC_PIN_GROUP(msiof2_rxd_d), + SH_PFC_PIN_GROUP(msiof3_clk_a), + SH_PFC_PIN_GROUP(msiof3_sync_a), + SH_PFC_PIN_GROUP(msiof3_ss1_a), + SH_PFC_PIN_GROUP(msiof3_ss2_a), + SH_PFC_PIN_GROUP(msiof3_txd_a), + SH_PFC_PIN_GROUP(msiof3_rxd_a), + SH_PFC_PIN_GROUP(msiof3_clk_b), + SH_PFC_PIN_GROUP(msiof3_sync_b), + SH_PFC_PIN_GROUP(msiof3_ss1_b), + SH_PFC_PIN_GROUP(msiof3_ss2_b), + SH_PFC_PIN_GROUP(msiof3_txd_b), + SH_PFC_PIN_GROUP(msiof3_rxd_b), + SH_PFC_PIN_GROUP(msiof3_clk_c), + SH_PFC_PIN_GROUP(msiof3_sync_c), + SH_PFC_PIN_GROUP(msiof3_txd_c), + SH_PFC_PIN_GROUP(msiof3_rxd_c), + SH_PFC_PIN_GROUP(msiof3_clk_d), + SH_PFC_PIN_GROUP(msiof3_sync_d), + SH_PFC_PIN_GROUP(msiof3_ss1_d), + SH_PFC_PIN_GROUP(msiof3_txd_d), + SH_PFC_PIN_GROUP(msiof3_rxd_d), + SH_PFC_PIN_GROUP(sata0_devslp_a), + SH_PFC_PIN_GROUP(sata0_devslp_b), SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), @@ -2082,6 +3269,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scif4_ctrl_c), SH_PFC_PIN_GROUP(scif5_data), SH_PFC_PIN_GROUP(scif5_clk), + SH_PFC_PIN_GROUP(scif_clk_a), + SH_PFC_PIN_GROUP(scif_clk_b), + SH_PFC_PIN_GROUP(sdhi0_data1), + SH_PFC_PIN_GROUP(sdhi0_data4), + SH_PFC_PIN_GROUP(sdhi0_ctrl), + SH_PFC_PIN_GROUP(sdhi0_cd), + SH_PFC_PIN_GROUP(sdhi0_wp), + SH_PFC_PIN_GROUP(sdhi1_data1), + SH_PFC_PIN_GROUP(sdhi1_data4), + SH_PFC_PIN_GROUP(sdhi1_ctrl), + SH_PFC_PIN_GROUP(sdhi1_cd), + SH_PFC_PIN_GROUP(sdhi1_wp), + SH_PFC_PIN_GROUP(sdhi2_data1), + SH_PFC_PIN_GROUP(sdhi2_data4), + SH_PFC_PIN_GROUP(sdhi2_data8), + SH_PFC_PIN_GROUP(sdhi2_ctrl), + SH_PFC_PIN_GROUP(sdhi2_cd_a), + SH_PFC_PIN_GROUP(sdhi2_wp_a), + SH_PFC_PIN_GROUP(sdhi2_cd_b), + SH_PFC_PIN_GROUP(sdhi2_wp_b), + SH_PFC_PIN_GROUP(sdhi2_ds), + SH_PFC_PIN_GROUP(sdhi3_data1), + SH_PFC_PIN_GROUP(sdhi3_data4), + SH_PFC_PIN_GROUP(sdhi3_data8), + SH_PFC_PIN_GROUP(sdhi3_ctrl), + SH_PFC_PIN_GROUP(sdhi3_cd), + SH_PFC_PIN_GROUP(sdhi3_wp), + SH_PFC_PIN_GROUP(sdhi3_ds), SH_PFC_PIN_GROUP(ssi0_data), SH_PFC_PIN_GROUP(ssi01239_ctrl), SH_PFC_PIN_GROUP(ssi1_data_a), @@ -2141,6 +3356,46 @@ static const char * const avb_groups[] = { "avb_avtp_capture_b", }; +static const char * const hscif0_groups[] = { + "hscif0_data", + "hscif0_clk", + "hscif0_ctrl", +}; + +static const char * const hscif1_groups[] = { + "hscif1_data_a", + "hscif1_clk_a", + "hscif1_ctrl_a", + "hscif1_data_b", + "hscif1_clk_b", + "hscif1_ctrl_b", +}; + +static const char * const hscif2_groups[] = { + "hscif2_data_a", + "hscif2_clk_a", + "hscif2_ctrl_a", + "hscif2_data_b", + "hscif2_clk_b", + "hscif2_ctrl_b", +}; + +static const char * const hscif3_groups[] = { + "hscif3_data_a", + "hscif3_clk", + "hscif3_ctrl", + "hscif3_data_b", + "hscif3_data_c", + "hscif3_data_d", +}; + +static const char * const hscif4_groups[] = { + "hscif4_data_a", + "hscif4_clk", + "hscif4_ctrl", + "hscif4_data_b", +}; + static const char * const i2c1_groups[] = { "i2c1_a", "i2c1_b", @@ -2157,6 +3412,116 @@ static const char * const i2c6_groups[] = { "i2c6_c", }; +static const char * const msiof0_groups[] = { + "msiof0_clk", + "msiof0_sync", + "msiof0_ss1", + "msiof0_ss2", + "msiof0_txd", + "msiof0_rxd", +}; + +static const char * const msiof1_groups[] = { + "msiof1_clk_a", + "msiof1_sync_a", + "msiof1_ss1_a", + "msiof1_ss2_a", + "msiof1_txd_a", + "msiof1_rxd_a", + "msiof1_clk_b", + "msiof1_sync_b", + "msiof1_ss1_b", + "msiof1_ss2_b", + "msiof1_txd_b", + "msiof1_rxd_b", + "msiof1_clk_c", + "msiof1_sync_c", + "msiof1_ss1_c", + "msiof1_ss2_c", + "msiof1_txd_c", + "msiof1_rxd_c", + "msiof1_clk_d", + "msiof1_sync_d", + "msiof1_ss1_d", + "msiof1_ss2_d", + "msiof1_txd_d", + "msiof1_rxd_d", + "msiof1_clk_e", + "msiof1_sync_e", + "msiof1_ss1_e", + "msiof1_ss2_e", + "msiof1_txd_e", + "msiof1_rxd_e", + "msiof1_clk_f", + "msiof1_sync_f", + "msiof1_ss1_f", + "msiof1_ss2_f", + "msiof1_txd_f", + "msiof1_rxd_f", + "msiof1_clk_g", + "msiof1_sync_g", + "msiof1_ss1_g", + "msiof1_ss2_g", + "msiof1_txd_g", + "msiof1_rxd_g", +}; + +static const char * const msiof2_groups[] = { + "msiof2_clk_a", + "msiof2_sync_a", + "msiof2_ss1_a", + "msiof2_ss2_a", + "msiof2_txd_a", + "msiof2_rxd_a", + "msiof2_clk_b", + "msiof2_sync_b", + "msiof2_ss1_b", + "msiof2_ss2_b", + "msiof2_txd_b", + "msiof2_rxd_b", + "msiof2_clk_c", + "msiof2_sync_c", + "msiof2_ss1_c", + "msiof2_ss2_c", + "msiof2_txd_c", + "msiof2_rxd_c", + "msiof2_clk_d", + "msiof2_sync_d", + "msiof2_ss1_d", + "msiof2_ss2_d", + "msiof2_txd_d", + "msiof2_rxd_d", +}; + +static const char * const msiof3_groups[] = { + "msiof3_clk_a", + "msiof3_sync_a", + "msiof3_ss1_a", + "msiof3_ss2_a", + "msiof3_txd_a", + "msiof3_rxd_a", + "msiof3_clk_b", + "msiof3_sync_b", + "msiof3_ss1_b", + "msiof3_ss2_b", + "msiof3_txd_b", + "msiof3_rxd_b", + "msiof3_clk_c", + "msiof3_sync_c", + "msiof3_txd_c", + "msiof3_rxd_c", + "msiof3_clk_d", + "msiof3_sync_d", + "msiof3_ss1_d", + "msiof3_txd_d", + "msiof3_rxd_d", +}; + +static const char * const sata0_groups[] = { + "sata0_devslp_a", + "sata0_devslp_b", +}; + static const char * const scif0_groups[] = { "scif0_data", "scif0_clk", @@ -2200,6 +3565,49 @@ static const char * const scif5_groups[] = { "scif5_clk", }; +static const char * const scif_clk_groups[] = { + "scif_clk_a", + "scif_clk_b", +}; + +static const char * const sdhi0_groups[] = { + "sdhi0_data1", + "sdhi0_data4", + "sdhi0_ctrl", + "sdhi0_cd", + "sdhi0_wp", +}; + +static const char * const sdhi1_groups[] = { + "sdhi1_data1", + "sdhi1_data4", + "sdhi1_ctrl", + "sdhi1_cd", + "sdhi1_wp", +}; + +static const char * const sdhi2_groups[] = { + "sdhi2_data1", + "sdhi2_data4", + "sdhi2_data8", + "sdhi2_ctrl", + "sdhi2_cd_a", + "sdhi2_wp_a", + "sdhi2_cd_b", + "sdhi2_wp_b", + "sdhi2_ds", +}; + +static const char * const sdhi3_groups[] = { + "sdhi3_data1", + "sdhi3_data4", + "sdhi3_data8", + "sdhi3_ctrl", + "sdhi3_cd", + "sdhi3_wp", + "sdhi3_ds", +}; + static const char * const ssi_groups[] = { "ssi0_data", "ssi01239_ctrl", @@ -2231,15 +3639,30 @@ static const char * const ssi_groups[] = { static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(audio_clk), SH_PFC_FUNCTION(avb), + SH_PFC_FUNCTION(hscif0), + SH_PFC_FUNCTION(hscif1), + SH_PFC_FUNCTION(hscif2), + SH_PFC_FUNCTION(hscif3), + SH_PFC_FUNCTION(hscif4), SH_PFC_FUNCTION(i2c1), SH_PFC_FUNCTION(i2c2), SH_PFC_FUNCTION(i2c6), + SH_PFC_FUNCTION(msiof0), + SH_PFC_FUNCTION(msiof1), + SH_PFC_FUNCTION(msiof2), + SH_PFC_FUNCTION(msiof3), + SH_PFC_FUNCTION(sata0), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif2), SH_PFC_FUNCTION(scif3), SH_PFC_FUNCTION(scif4), SH_PFC_FUNCTION(scif5), + SH_PFC_FUNCTION(scif_clk), + SH_PFC_FUNCTION(sdhi0), + SH_PFC_FUNCTION(sdhi1), + SH_PFC_FUNCTION(sdhi2), + SH_PFC_FUNCTION(sdhi3), SH_PFC_FUNCTION(ssi), }; diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c index 6a69c8c5d943..d25e6f674d0a 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c @@ -2059,7 +2059,7 @@ static const unsigned int lcd2_data9_mux[] = { LCD2D8_MARK, }; static const unsigned int lcd2_data12_pins[] = { - /* D[0:12] */ + /* D[0:11] */ 128, 129, 142, 143, 144, 145, 138, 139, 140, 141, 130, 131, }; @@ -2198,6 +2198,420 @@ static const unsigned int mmc0_ctrl_1_pins[] = { static const unsigned int mmc0_ctrl_1_mux[] = { MMCCMD1_MARK, MMCCLK1_MARK, }; +/* - MSIOF0 ----------------------------------------------------------------- */ +static const unsigned int msiof0_rsck_pins[] = { + /* RSCK */ + 66, +}; +static const unsigned int msiof0_rsck_mux[] = { + MSIOF0_RSCK_MARK, +}; +static const unsigned int msiof0_tsck_pins[] = { + /* TSCK */ + 64, +}; +static const unsigned int msiof0_tsck_mux[] = { + MSIOF0_TSCK_MARK, +}; +static const unsigned int msiof0_rsync_pins[] = { + /* RSYNC */ + 67, +}; +static const unsigned int msiof0_rsync_mux[] = { + MSIOF0_RSYNC_MARK, +}; +static const unsigned int msiof0_tsync_pins[] = { + /* TSYNC */ + 63, +}; +static const unsigned int msiof0_tsync_mux[] = { + MSIOF0_TSYNC_MARK, +}; +static const unsigned int msiof0_ss1_pins[] = { + /* SS1 */ + 62, +}; +static const unsigned int msiof0_ss1_mux[] = { + MSIOF0_SS1_MARK, +}; +static const unsigned int msiof0_ss2_pins[] = { + /* SS2 */ + 71, +}; +static const unsigned int msiof0_ss2_mux[] = { + MSIOF0_SS2_MARK, +}; +static const unsigned int msiof0_rxd_pins[] = { + /* RXD */ + 70, +}; +static const unsigned int msiof0_rxd_mux[] = { + MSIOF0_RXD_MARK, +}; +static const unsigned int msiof0_txd_pins[] = { + /* TXD */ + 65, +}; +static const unsigned int msiof0_txd_mux[] = { + MSIOF0_TXD_MARK, +}; +static const unsigned int msiof0_mck0_pins[] = { + /* MSCK0 */ + 68, +}; +static const unsigned int msiof0_mck0_mux[] = { + MSIOF0_MCK0_MARK, +}; + +static const unsigned int msiof0_mck1_pins[] = { + /* MSCK1 */ + 69, +}; +static const unsigned int msiof0_mck1_mux[] = { + MSIOF0_MCK1_MARK, +}; + +static const unsigned int msiof0l_rsck_pins[] = { + /* RSCK */ + 214, +}; +static const unsigned int msiof0l_rsck_mux[] = { + MSIOF0L_RSCK_MARK, +}; +static const unsigned int msiof0l_tsck_pins[] = { + /* TSCK */ + 219, +}; +static const unsigned int msiof0l_tsck_mux[] = { + MSIOF0L_TSCK_MARK, +}; +static const unsigned int msiof0l_rsync_pins[] = { + /* RSYNC */ + 215, +}; +static const unsigned int msiof0l_rsync_mux[] = { + MSIOF0L_RSYNC_MARK, +}; +static const unsigned int msiof0l_tsync_pins[] = { + /* TSYNC */ + 217, +}; +static const unsigned int msiof0l_tsync_mux[] = { + MSIOF0L_TSYNC_MARK, +}; +static const unsigned int msiof0l_ss1_a_pins[] = { + /* SS1 */ + 207, +}; +static const unsigned int msiof0l_ss1_a_mux[] = { + PORT207_MSIOF0L_SS1_MARK, +}; +static const unsigned int msiof0l_ss1_b_pins[] = { + /* SS1 */ + 210, +}; +static const unsigned int msiof0l_ss1_b_mux[] = { + PORT210_MSIOF0L_SS1_MARK, +}; +static const unsigned int msiof0l_ss2_a_pins[] = { + /* SS2 */ + 208, +}; +static const unsigned int msiof0l_ss2_a_mux[] = { + PORT208_MSIOF0L_SS2_MARK, +}; +static const unsigned int msiof0l_ss2_b_pins[] = { + /* SS2 */ + 211, +}; +static const unsigned int msiof0l_ss2_b_mux[] = { + PORT211_MSIOF0L_SS2_MARK, +}; +static const unsigned int msiof0l_rxd_pins[] = { + /* RXD */ + 221, +}; +static const unsigned int msiof0l_rxd_mux[] = { + MSIOF0L_RXD_MARK, +}; +static const unsigned int msiof0l_txd_pins[] = { + /* TXD */ + 222, +}; +static const unsigned int msiof0l_txd_mux[] = { + MSIOF0L_TXD_MARK, +}; +static const unsigned int msiof0l_mck0_pins[] = { + /* MSCK0 */ + 212, +}; +static const unsigned int msiof0l_mck0_mux[] = { + MSIOF0L_MCK0_MARK, +}; +static const unsigned int msiof0l_mck1_pins[] = { + /* MSCK1 */ + 213, +}; +static const unsigned int msiof0l_mck1_mux[] = { + MSIOF0L_MCK1_MARK, +}; +/* - MSIOF1 ----------------------------------------------------------------- */ +static const unsigned int msiof1_rsck_pins[] = { + /* RSCK */ + 234, +}; +static const unsigned int msiof1_rsck_mux[] = { + MSIOF1_RSCK_MARK, +}; +static const unsigned int msiof1_tsck_pins[] = { + /* TSCK */ + 232, +}; +static const unsigned int msiof1_tsck_mux[] = { + MSIOF1_TSCK_MARK, +}; +static const unsigned int msiof1_rsync_pins[] = { + /* RSYNC */ + 235, +}; +static const unsigned int msiof1_rsync_mux[] = { + MSIOF1_RSYNC_MARK, +}; +static const unsigned int msiof1_tsync_pins[] = { + /* TSYNC */ + 231, +}; +static const unsigned int msiof1_tsync_mux[] = { + MSIOF1_TSYNC_MARK, +}; +static const unsigned int msiof1_ss1_pins[] = { + /* SS1 */ + 238, +}; +static const unsigned int msiof1_ss1_mux[] = { + MSIOF1_SS1_MARK, +}; +static const unsigned int msiof1_ss2_pins[] = { + /* SS2 */ + 239, +}; +static const unsigned int msiof1_ss2_mux[] = { + MSIOF1_SS2_MARK, +}; +static const unsigned int msiof1_rxd_pins[] = { + /* RXD */ + 233, +}; +static const unsigned int msiof1_rxd_mux[] = { + MSIOF1_RXD_MARK, +}; +static const unsigned int msiof1_txd_pins[] = { + /* TXD */ + 230, +}; +static const unsigned int msiof1_txd_mux[] = { + MSIOF1_TXD_MARK, +}; +static const unsigned int msiof1_mck0_pins[] = { + /* MSCK0 */ + 236, +}; +static const unsigned int msiof1_mck0_mux[] = { + MSIOF1_MCK0_MARK, +}; +static const unsigned int msiof1_mck1_pins[] = { + /* MSCK1 */ + 237, +}; +static const unsigned int msiof1_mck1_mux[] = { + MSIOF1_MCK1_MARK, +}; +/* - MSIOF2 ----------------------------------------------------------------- */ +static const unsigned int msiof2_rsck_pins[] = { + /* RSCK */ + 151, +}; +static const unsigned int msiof2_rsck_mux[] = { + MSIOF2_RSCK_MARK, +}; +static const unsigned int msiof2_tsck_pins[] = { + /* TSCK */ + 135, +}; +static const unsigned int msiof2_tsck_mux[] = { + MSIOF2_TSCK_MARK, +}; +static const unsigned int msiof2_rsync_pins[] = { + /* RSYNC */ + 152, +}; +static const unsigned int msiof2_rsync_mux[] = { + MSIOF2_RSYNC_MARK, +}; +static const unsigned int msiof2_tsync_pins[] = { + /* TSYNC */ + 133, +}; +static const unsigned int msiof2_tsync_mux[] = { + MSIOF2_TSYNC_MARK, +}; +static const unsigned int msiof2_ss1_a_pins[] = { + /* SS1 */ + 131, +}; +static const unsigned int msiof2_ss1_a_mux[] = { + PORT131_MSIOF2_SS1_MARK, +}; +static const unsigned int msiof2_ss1_b_pins[] = { + /* SS1 */ + 153, +}; +static const unsigned int msiof2_ss1_b_mux[] = { + PORT153_MSIOF2_SS1_MARK, +}; +static const unsigned int msiof2_ss2_a_pins[] = { + /* SS2 */ + 132, +}; +static const unsigned int msiof2_ss2_a_mux[] = { + PORT132_MSIOF2_SS2_MARK, +}; +static const unsigned int msiof2_ss2_b_pins[] = { + /* SS2 */ + 156, +}; +static const unsigned int msiof2_ss2_b_mux[] = { + PORT156_MSIOF2_SS2_MARK, +}; +static const unsigned int msiof2_rxd_a_pins[] = { + /* RXD */ + 130, +}; +static const unsigned int msiof2_rxd_a_mux[] = { + PORT130_MSIOF2_RXD_MARK, +}; +static const unsigned int msiof2_rxd_b_pins[] = { + /* RXD */ + 157, +}; +static const unsigned int msiof2_rxd_b_mux[] = { + PORT157_MSIOF2_RXD_MARK, +}; +static const unsigned int msiof2_txd_pins[] = { + /* TXD */ + 134, +}; +static const unsigned int msiof2_txd_mux[] = { + MSIOF2_TXD_MARK, +}; +static const unsigned int msiof2_mck0_pins[] = { + /* MSCK0 */ + 154, +}; +static const unsigned int msiof2_mck0_mux[] = { + MSIOF2_MCK0_MARK, +}; +static const unsigned int msiof2_mck1_pins[] = { + /* MSCK1 */ + 155, +}; +static const unsigned int msiof2_mck1_mux[] = { + MSIOF2_MCK1_MARK, +}; + +static const unsigned int msiof2r_tsck_pins[] = { + /* TSCK */ + 248, +}; +static const unsigned int msiof2r_tsck_mux[] = { + MSIOF2R_TSCK_MARK, +}; +static const unsigned int msiof2r_tsync_pins[] = { + /* TSYNC */ + 249, +}; +static const unsigned int msiof2r_tsync_mux[] = { + MSIOF2R_TSYNC_MARK, +}; +static const unsigned int msiof2r_rxd_pins[] = { + /* RXD */ + 244, +}; +static const unsigned int msiof2r_rxd_mux[] = { + MSIOF2R_RXD_MARK, +}; +static const unsigned int msiof2r_txd_pins[] = { + /* TXD */ + 245, +}; +static const unsigned int msiof2r_txd_mux[] = { + MSIOF2R_TXD_MARK, +}; +/* - MSIOF3 (Pin function name of MSIOF3 is named BBIF1) -------------------- */ +static const unsigned int msiof3_rsck_pins[] = { + /* RSCK */ + 115, +}; +static const unsigned int msiof3_rsck_mux[] = { + BBIF1_RSCK_MARK, +}; +static const unsigned int msiof3_tsck_pins[] = { + /* TSCK */ + 112, +}; +static const unsigned int msiof3_tsck_mux[] = { + BBIF1_TSCK_MARK, +}; +static const unsigned int msiof3_rsync_pins[] = { + /* RSYNC */ + 116, +}; +static const unsigned int msiof3_rsync_mux[] = { + BBIF1_RSYNC_MARK, +}; +static const unsigned int msiof3_tsync_pins[] = { + /* TSYNC */ + 113, +}; +static const unsigned int msiof3_tsync_mux[] = { + BBIF1_TSYNC_MARK, +}; +static const unsigned int msiof3_ss1_pins[] = { + /* SS1 */ + 117, +}; +static const unsigned int msiof3_ss1_mux[] = { + BBIF1_SS1_MARK, +}; +static const unsigned int msiof3_ss2_pins[] = { + /* SS2 */ + 109, +}; +static const unsigned int msiof3_ss2_mux[] = { + BBIF1_SS2_MARK, +}; +static const unsigned int msiof3_rxd_pins[] = { + /* RXD */ + 111, +}; +static const unsigned int msiof3_rxd_mux[] = { + BBIF1_RXD_MARK, +}; +static const unsigned int msiof3_txd_pins[] = { + /* TXD */ + 114, +}; +static const unsigned int msiof3_txd_mux[] = { + BBIF1_TXD_MARK, +}; +static const unsigned int msiof3_flow_pins[] = { + /* FLOW */ + 117, +}; +static const unsigned int msiof3_flow_mux[] = { + BBIF1_FLOW_MARK, +}; + /* - SCIFA0 ----------------------------------------------------------------- */ static const unsigned int scifa0_data_pins[] = { /* RXD, TXD */ @@ -2782,6 +3196,64 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(mmc0_data4_1), SH_PFC_PIN_GROUP(mmc0_data8_1), SH_PFC_PIN_GROUP(mmc0_ctrl_1), + SH_PFC_PIN_GROUP(msiof0_rsck), + SH_PFC_PIN_GROUP(msiof0_tsck), + SH_PFC_PIN_GROUP(msiof0_rsync), + SH_PFC_PIN_GROUP(msiof0_tsync), + SH_PFC_PIN_GROUP(msiof0_ss1), + SH_PFC_PIN_GROUP(msiof0_ss2), + SH_PFC_PIN_GROUP(msiof0_rxd), + SH_PFC_PIN_GROUP(msiof0_txd), + SH_PFC_PIN_GROUP(msiof0_mck0), + SH_PFC_PIN_GROUP(msiof0_mck1), + SH_PFC_PIN_GROUP(msiof0l_rsck), + SH_PFC_PIN_GROUP(msiof0l_tsck), + SH_PFC_PIN_GROUP(msiof0l_rsync), + SH_PFC_PIN_GROUP(msiof0l_tsync), + SH_PFC_PIN_GROUP(msiof0l_ss1_a), + SH_PFC_PIN_GROUP(msiof0l_ss1_b), + SH_PFC_PIN_GROUP(msiof0l_ss2_a), + SH_PFC_PIN_GROUP(msiof0l_ss2_b), + SH_PFC_PIN_GROUP(msiof0l_rxd), + SH_PFC_PIN_GROUP(msiof0l_txd), + SH_PFC_PIN_GROUP(msiof0l_mck0), + SH_PFC_PIN_GROUP(msiof0l_mck1), + SH_PFC_PIN_GROUP(msiof1_rsck), + SH_PFC_PIN_GROUP(msiof1_tsck), + SH_PFC_PIN_GROUP(msiof1_rsync), + SH_PFC_PIN_GROUP(msiof1_tsync), + SH_PFC_PIN_GROUP(msiof1_ss1), + SH_PFC_PIN_GROUP(msiof1_ss2), + SH_PFC_PIN_GROUP(msiof1_rxd), + SH_PFC_PIN_GROUP(msiof1_txd), + SH_PFC_PIN_GROUP(msiof1_mck0), + SH_PFC_PIN_GROUP(msiof1_mck1), + SH_PFC_PIN_GROUP(msiof2_rsck), + SH_PFC_PIN_GROUP(msiof2_tsck), + SH_PFC_PIN_GROUP(msiof2_rsync), + SH_PFC_PIN_GROUP(msiof2_tsync), + SH_PFC_PIN_GROUP(msiof2_ss1_a), + SH_PFC_PIN_GROUP(msiof2_ss1_b), + SH_PFC_PIN_GROUP(msiof2_ss2_a), + SH_PFC_PIN_GROUP(msiof2_ss2_b), + SH_PFC_PIN_GROUP(msiof2_rxd_a), + SH_PFC_PIN_GROUP(msiof2_rxd_b), + SH_PFC_PIN_GROUP(msiof2_txd), + SH_PFC_PIN_GROUP(msiof2_mck0), + SH_PFC_PIN_GROUP(msiof2_mck1), + SH_PFC_PIN_GROUP(msiof2r_tsck), + SH_PFC_PIN_GROUP(msiof2r_tsync), + SH_PFC_PIN_GROUP(msiof2r_rxd), + SH_PFC_PIN_GROUP(msiof2r_txd), + SH_PFC_PIN_GROUP(msiof3_rsck), + SH_PFC_PIN_GROUP(msiof3_tsck), + SH_PFC_PIN_GROUP(msiof3_rsync), + SH_PFC_PIN_GROUP(msiof3_tsync), + SH_PFC_PIN_GROUP(msiof3_ss1), + SH_PFC_PIN_GROUP(msiof3_ss2), + SH_PFC_PIN_GROUP(msiof3_rxd), + SH_PFC_PIN_GROUP(msiof3_txd), + SH_PFC_PIN_GROUP(msiof3_flow), SH_PFC_PIN_GROUP(scifa0_data), SH_PFC_PIN_GROUP(scifa0_clk), SH_PFC_PIN_GROUP(scifa0_ctrl), @@ -2982,6 +3454,76 @@ static const char * const mmc0_groups[] = { "mmc0_ctrl_1", }; +static const char * const msiof0_groups[] = { + "msiof0_rsck", + "msiof0_tsck", + "msiof0_rsync", + "msiof0_tsync", + "msiof0_ss1", + "msiof0_ss2", + "msiof0_rxd", + "msiof0_txd", + "msiof0_mck0", + "msiof0_mck1", + "msiof0l_rsck", + "msiof0l_tsck", + "msiof0l_rsync", + "msiof0l_tsync", + "msiof0l_ss1_a", + "msiof0l_ss1_b", + "msiof0l_ss2_a", + "msiof0l_ss2_b", + "msiof0l_rxd", + "msiof0l_txd", + "msiof0l_mck0", + "msiof0l_mck1", +}; + +static const char * const msiof1_groups[] = { + "msiof1_rsck", + "msiof1_tsck", + "msiof1_rsync", + "msiof1_tsync", + "msiof1_ss1", + "msiof1_ss2", + "msiof1_rxd", + "msiof1_txd", + "msiof1_mck0", + "msiof1_mck1", +}; + +static const char * const msiof2_groups[] = { + "msiof2_rsck", + "msiof2_tsck", + "msiof2_rsync", + "msiof2_tsync", + "msiof2_ss1_a", + "msiof2_ss1_b", + "msiof2_ss2_a", + "msiof2_ss2_b", + "msiof2_rxd_a", + "msiof2_rxd_b", + "msiof2_txd", + "msiof2_mck0", + "msiof2_mck1", + "msiof2r_tsck", + "msiof2r_tsync", + "msiof2r_rxd", + "msiof2r_txd", +}; + +static const char * const msiof3_groups[] = { + "msiof3_rsck", + "msiof3_tsck", + "msiof3_rsync", + "msiof3_tsync", + "msiof3_ss1", + "msiof3_ss2", + "msiof3_rxd", + "msiof3_txd", + "msiof3_flow", +}; + static const char * const scifa0_groups[] = { "scifa0_data", "scifa0_clk", @@ -3116,6 +3658,10 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(lcd), SH_PFC_FUNCTION(lcd2), SH_PFC_FUNCTION(mmc0), + SH_PFC_FUNCTION(msiof0), + SH_PFC_FUNCTION(msiof1), + SH_PFC_FUNCTION(msiof2), + SH_PFC_FUNCTION(msiof3), SH_PFC_FUNCTION(scifa0), SH_PFC_FUNCTION(scifa1), SH_PFC_FUNCTION(scifa2), diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index 9842bb106796..b0b328b3130b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c @@ -14,14 +14,6 @@ #include "sh_pfc.h" -#define PORT_GP_12(bank, fn, sfx) \ - PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ - PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ - PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ - PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ - PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ - PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx) - #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_32(0, fn, sfx), \ PORT_GP_32(1, fn, sfx), \ @@ -585,15 +577,18 @@ enum { static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ - PINMUX_DATA(CLKOUT_MARK, FN_CLKOUT), - PINMUX_DATA(BS_MARK, FN_BS), PINMUX_DATA(CS0_MARK, FN_CS0), - PINMUX_DATA(EX_CS0_MARK, FN_EX_CS0), - PINMUX_DATA(RD_MARK, FN_RD), PINMUX_DATA(WE0_MARK, FN_WE0), - PINMUX_DATA(WE1_MARK, FN_WE1), - PINMUX_DATA(SCL0_MARK, FN_SCL0), PINMUX_DATA(PENC0_MARK, FN_PENC0), - PINMUX_DATA(USB_OVC0_MARK, FN_USB_OVC0), - PINMUX_DATA(IRQ2_B_MARK, FN_IRQ2_B), - PINMUX_DATA(IRQ3_B_MARK, FN_IRQ3_B), + PINMUX_SINGLE(CLKOUT), + PINMUX_SINGLE(BS), + PINMUX_SINGLE(CS0), + PINMUX_SINGLE(EX_CS0), + PINMUX_SINGLE(RD), + PINMUX_SINGLE(WE0), + PINMUX_SINGLE(WE1), + PINMUX_SINGLE(SCL0), + PINMUX_SINGLE(PENC0), + PINMUX_SINGLE(USB_OVC0), + PINMUX_SINGLE(IRQ2_B), + PINMUX_SINGLE(IRQ3_B), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_1_0, A0), diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 863c3e30ce05..87b0a599afaf 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -273,8 +273,10 @@ static int sh_pfc_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_child_of_node(np, child) { ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps, &index); - if (ret < 0) + if (ret < 0) { + of_node_put(child); goto done; + } } /* If no mapping has been found in child nodes try the config node. */ diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 7b373d43d981..2123ab49d6a5 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -199,28 +199,82 @@ struct sh_pfc_soc_info { PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn) /* + * Describe a pinmux configuration for a single-function pin with GPIO + * capability. + * - fn: Function name + */ +#define PINMUX_SINGLE(fn) \ + PINMUX_DATA(fn##_MARK, FN_##fn) + +/* * GP port style (32 ports banks) */ #define PORT_GP_CFG_1(bank, pin, fn, sfx, cfg) fn(bank, pin, GP_##bank##_##pin, sfx, cfg) #define PORT_GP_1(bank, pin, fn, sfx) PORT_GP_CFG_1(bank, pin, fn, sfx, 0) -#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \ +#define PORT_GP_CFG_4(bank, fn, sfx, cfg) \ PORT_GP_CFG_1(bank, 0, fn, sfx, cfg), PORT_GP_CFG_1(bank, 1, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), PORT_GP_CFG_1(bank, 3, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), PORT_GP_CFG_1(bank, 3, fn, sfx, cfg) +#define PORT_GP_4(bank, fn, sfx) PORT_GP_CFG_4(bank, fn, sfx, 0) + +#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \ + PORT_GP_CFG_4(bank, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 4, fn, sfx, cfg), PORT_GP_CFG_1(bank, 5, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), PORT_GP_CFG_1(bank, 7, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), PORT_GP_CFG_1(bank, 7, fn, sfx, cfg) +#define PORT_GP_8(bank, fn, sfx) PORT_GP_CFG_8(bank, fn, sfx, 0) + +#define PORT_GP_CFG_9(bank, fn, sfx, cfg) \ + PORT_GP_CFG_8(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 8, fn, sfx, cfg) +#define PORT_GP_9(bank, fn, sfx) PORT_GP_CFG_9(bank, fn, sfx, 0) + +#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \ + PORT_GP_CFG_8(bank, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 8, fn, sfx, cfg), PORT_GP_CFG_1(bank, 9, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), PORT_GP_CFG_1(bank, 11, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), PORT_GP_CFG_1(bank, 13, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 14, fn, sfx, cfg), PORT_GP_CFG_1(bank, 15, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 16, fn, sfx, cfg), PORT_GP_CFG_1(bank, 17, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), PORT_GP_CFG_1(bank, 11, fn, sfx, cfg) +#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0) + +#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \ + PORT_GP_CFG_12(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), PORT_GP_CFG_1(bank, 13, fn, sfx, cfg) +#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0) + +#define PORT_GP_CFG_15(bank, fn, sfx, cfg) \ + PORT_GP_CFG_14(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 14, fn, sfx, cfg) +#define PORT_GP_15(bank, fn, sfx) PORT_GP_CFG_15(bank, fn, sfx, 0) + +#define PORT_GP_CFG_16(bank, fn, sfx, cfg) \ + PORT_GP_CFG_14(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 14, fn, sfx, cfg), PORT_GP_CFG_1(bank, 15, fn, sfx, cfg) +#define PORT_GP_16(bank, fn, sfx) PORT_GP_CFG_16(bank, fn, sfx, 0) + +#define PORT_GP_CFG_18(bank, fn, sfx, cfg) \ + PORT_GP_CFG_16(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 16, fn, sfx, cfg), PORT_GP_CFG_1(bank, 17, fn, sfx, cfg) +#define PORT_GP_18(bank, fn, sfx) PORT_GP_CFG_18(bank, fn, sfx, 0) + +#define PORT_GP_CFG_26(bank, fn, sfx, cfg) \ + PORT_GP_CFG_18(bank, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 18, fn, sfx, cfg), PORT_GP_CFG_1(bank, 19, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 20, fn, sfx, cfg), PORT_GP_CFG_1(bank, 21, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 22, fn, sfx, cfg), PORT_GP_CFG_1(bank, 23, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), PORT_GP_CFG_1(bank, 25, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), PORT_GP_CFG_1(bank, 27, fn, sfx, cfg), \ - PORT_GP_CFG_1(bank, 28, fn, sfx, cfg), PORT_GP_CFG_1(bank, 29, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), PORT_GP_CFG_1(bank, 25, fn, sfx, cfg) +#define PORT_GP_26(bank, fn, sfx) PORT_GP_CFG_26(bank, fn, sfx, 0) + +#define PORT_GP_CFG_28(bank, fn, sfx, cfg) \ + PORT_GP_CFG_26(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), PORT_GP_CFG_1(bank, 27, fn, sfx, cfg) +#define PORT_GP_28(bank, fn, sfx) PORT_GP_CFG_28(bank, fn, sfx, 0) + +#define PORT_GP_CFG_30(bank, fn, sfx, cfg) \ + PORT_GP_CFG_28(bank, fn, sfx, cfg), \ + PORT_GP_CFG_1(bank, 28, fn, sfx, cfg), PORT_GP_CFG_1(bank, 29, fn, sfx, cfg) +#define PORT_GP_30(bank, fn, sfx) PORT_GP_CFG_30(bank, fn, sfx, 0) + +#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \ + PORT_GP_CFG_30(bank, fn, sfx, cfg), \ PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), PORT_GP_CFG_1(bank, 31, fn, sfx, cfg) #define PORT_GP_32(bank, fn, sfx) PORT_GP_CFG_32(bank, fn, sfx, 0) diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 829018c812bd..053d98e33944 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -161,6 +161,9 @@ enum altas7_pad_type { #define IN_DISABLE_VAL_1_REG_SET 0x0A88 #define IN_DISABLE_VAL_1_REG_CLR 0x0A8C +/* Offset of the SDIO9SEL*/ +#define SYS2PCI_SDIO9SEL 0x14 + struct dt_params { const char *property; int value; @@ -370,6 +373,7 @@ struct atlas7_pmx { struct pinctrl_desc pctl_desc; struct atlas7_pinctrl_data *pctl_data; void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS]; + void __iomem *sys2pci_base; u32 status_ds[NUM_OF_IN_DISABLE_REG]; u32 status_dsv[NUM_OF_IN_DISABLE_REG]; struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS]; @@ -885,11 +889,12 @@ static const unsigned int lr_lcdrom_pins[] = { 73, 54, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 56, 53, 55, }; static const unsigned int lvds_analog_pins[] = { 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, }; -static const unsigned int nd_df_pins[] = { 44, 43, 42, 41, 40, 39, 38, 37, - 47, 46, 52, 51, 45, 49, 50, 48, 124, }; -static const unsigned int nd_df_nowp_pins[] = { 44, 43, 42, 41, 40, 39, 38, - 37, 47, 46, 52, 51, 45, 49, 50, 48, }; +static const unsigned int nd_df_basic_pins[] = { 44, 43, 42, 41, 40, 39, 38, + 37, 47, 46, 52, 45, 49, 50, 48, }; +static const unsigned int nd_df_wp_pins[] = { 124, }; +static const unsigned int nd_df_cs_pins[] = { 51, }; static const unsigned int ps_pins[] = { 120, 119, 121, }; +static const unsigned int ps_no_dir_pins[] = { 119, }; static const unsigned int pwc_core_on_pins[] = { 8, }; static const unsigned int pwc_ext_on_pins[] = { 6, }; static const unsigned int pwc_gpio3_clk_pins[] = { 3, }; @@ -944,7 +949,7 @@ static const unsigned int sd2_cdb_pins0[] = { 124, }; static const unsigned int sd2_cdb_pins1[] = { 161, }; static const unsigned int sd2_wpb_pins0[] = { 123, }; static const unsigned int sd2_wpb_pins1[] = { 163, }; -static const unsigned int sd3_pins[] = { 85, 86, 87, 88, 89, 90, }; +static const unsigned int sd3_9_pins[] = { 85, 86, 87, 88, 89, 90, }; static const unsigned int sd5_pins[] = { 91, 92, 93, 94, 95, 96, }; static const unsigned int sd6_pins0[] = { 79, 78, 74, 75, 76, 77, }; static const unsigned int sd6_pins1[] = { 101, 99, 100, 110, 109, 111, }; @@ -998,9 +1003,9 @@ static const unsigned int vi_vip1_ext_pins[] = { 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 108, 103, 104, 105, 106, 107, 102, 97, 98, 99, 100, }; static const unsigned int vi_vip1_low8bit_pins[] = { 74, 75, 76, 77, 78, 79, - 80, 81, }; -static const unsigned int vi_vip1_high8bit_pins[] = { 82, 83, 84, 108, 103, - 104, 105, 106, }; + 80, 81, 82, 83, 84, }; +static const unsigned int vi_vip1_high8bit_pins[] = { 82, 83, 84, 103, 104, + 105, 106, 107, 102, 97, 98, }; /* definition of pin group table */ struct atlas7_pin_group altas7_pin_groups[] = { @@ -1142,9 +1147,11 @@ struct atlas7_pin_group altas7_pin_groups[] = { GROUP("ld_ldd_lck_grp", ld_ldd_lck_pins), GROUP("lr_lcdrom_grp", lr_lcdrom_pins), GROUP("lvds_analog_grp", lvds_analog_pins), - GROUP("nd_df_grp", nd_df_pins), - GROUP("nd_df_nowp_grp", nd_df_nowp_pins), + GROUP("nd_df_basic_grp", nd_df_basic_pins), + GROUP("nd_df_wp_grp", nd_df_wp_pins), + GROUP("nd_df_cs_grp", nd_df_cs_pins), GROUP("ps_grp", ps_pins), + GROUP("ps_no_dir_grp", ps_no_dir_pins), GROUP("pwc_core_on_grp", pwc_core_on_pins), GROUP("pwc_ext_on_grp", pwc_ext_on_pins), GROUP("pwc_gpio3_clk_grp", pwc_gpio3_clk_pins), @@ -1196,7 +1203,7 @@ struct atlas7_pin_group altas7_pin_groups[] = { GROUP("sd2_cdb_grp1", sd2_cdb_pins1), GROUP("sd2_wpb_grp0", sd2_wpb_pins0), GROUP("sd2_wpb_grp1", sd2_wpb_pins1), - GROUP("sd3_grp", sd3_pins), + GROUP("sd3_9_grp", sd3_9_pins), GROUP("sd5_grp", sd5_pins), GROUP("sd6_grp0", sd6_pins0), GROUP("sd6_grp1", sd6_pins1), @@ -1421,9 +1428,11 @@ static const char * const ld_ldd_fck_grp[] = { "ld_ldd_fck_grp", }; static const char * const ld_ldd_lck_grp[] = { "ld_ldd_lck_grp", }; static const char * const lr_lcdrom_grp[] = { "lr_lcdrom_grp", }; static const char * const lvds_analog_grp[] = { "lvds_analog_grp", }; -static const char * const nd_df_grp[] = { "nd_df_grp", }; -static const char * const nd_df_nowp_grp[] = { "nd_df_nowp_grp", }; +static const char * const nd_df_basic_grp[] = { "nd_df_basic_grp", }; +static const char * const nd_df_wp_grp[] = { "nd_df_wp_grp", }; +static const char * const nd_df_cs_grp[] = { "nd_df_cs_grp", }; static const char * const ps_grp[] = { "ps_grp", }; +static const char * const ps_no_dir_grp[] = { "ps_no_dir_grp", }; static const char * const pwc_core_on_grp[] = { "pwc_core_on_grp", }; static const char * const pwc_ext_on_grp[] = { "pwc_ext_on_grp", }; static const char * const pwc_gpio3_clk_grp[] = { "pwc_gpio3_clk_grp", }; @@ -1478,7 +1487,7 @@ static const char * const sd2_cdb_grp0[] = { "sd2_cdb_grp0", }; static const char * const sd2_cdb_grp1[] = { "sd2_cdb_grp1", }; static const char * const sd2_wpb_grp0[] = { "sd2_wpb_grp0", }; static const char * const sd2_wpb_grp1[] = { "sd2_wpb_grp1", }; -static const char * const sd3_grp[] = { "sd3_grp", }; +static const char * const sd3_9_grp[] = { "sd3_9_grp", }; static const char * const sd5_grp[] = { "sd5_grp", }; static const char * const sd6_grp0[] = { "sd6_grp0", }; static const char * const sd6_grp1[] = { "sd6_grp1", }; @@ -3174,7 +3183,7 @@ static struct atlas7_grp_mux lvds_analog_grp_mux = { .pad_mux_list = lvds_analog_grp_pad_mux, }; -static struct atlas7_pad_mux nd_df_grp_pad_mux[] = { +static struct atlas7_pad_mux nd_df_basic_grp_pad_mux[] = { MUX(1, 44, 1, N, N, N, N), MUX(1, 43, 1, N, N, N, N), MUX(1, 42, 1, N, N, N, N), @@ -3186,41 +3195,33 @@ static struct atlas7_pad_mux nd_df_grp_pad_mux[] = { MUX(1, 47, 1, N, N, N, N), MUX(1, 46, 1, N, N, N, N), MUX(1, 52, 1, N, N, N, N), - MUX(1, 51, 1, N, N, N, N), MUX(1, 45, 1, N, N, N, N), MUX(1, 49, 1, N, N, N, N), MUX(1, 50, 1, N, N, N, N), MUX(1, 48, 1, N, N, N, N), +}; + +static struct atlas7_grp_mux nd_df_basic_grp_mux = { + .pad_mux_count = ARRAY_SIZE(nd_df_basic_grp_pad_mux), + .pad_mux_list = nd_df_basic_grp_pad_mux, +}; + +static struct atlas7_pad_mux nd_df_wp_grp_pad_mux[] = { MUX(1, 124, 4, N, N, N, N), }; -static struct atlas7_grp_mux nd_df_grp_mux = { - .pad_mux_count = ARRAY_SIZE(nd_df_grp_pad_mux), - .pad_mux_list = nd_df_grp_pad_mux, +static struct atlas7_grp_mux nd_df_wp_grp_mux = { + .pad_mux_count = ARRAY_SIZE(nd_df_wp_grp_pad_mux), + .pad_mux_list = nd_df_wp_grp_pad_mux, }; -static struct atlas7_pad_mux nd_df_nowp_grp_pad_mux[] = { - MUX(1, 44, 1, N, N, N, N), - MUX(1, 43, 1, N, N, N, N), - MUX(1, 42, 1, N, N, N, N), - MUX(1, 41, 1, N, N, N, N), - MUX(1, 40, 1, N, N, N, N), - MUX(1, 39, 1, N, N, N, N), - MUX(1, 38, 1, N, N, N, N), - MUX(1, 37, 1, N, N, N, N), - MUX(1, 47, 1, N, N, N, N), - MUX(1, 46, 1, N, N, N, N), - MUX(1, 52, 1, N, N, N, N), +static struct atlas7_pad_mux nd_df_cs_grp_pad_mux[] = { MUX(1, 51, 1, N, N, N, N), - MUX(1, 45, 1, N, N, N, N), - MUX(1, 49, 1, N, N, N, N), - MUX(1, 50, 1, N, N, N, N), - MUX(1, 48, 1, N, N, N, N), }; -static struct atlas7_grp_mux nd_df_nowp_grp_mux = { - .pad_mux_count = ARRAY_SIZE(nd_df_nowp_grp_pad_mux), - .pad_mux_list = nd_df_nowp_grp_pad_mux, +static struct atlas7_grp_mux nd_df_cs_grp_mux = { + .pad_mux_count = ARRAY_SIZE(nd_df_cs_grp_pad_mux), + .pad_mux_list = nd_df_cs_grp_pad_mux, }; static struct atlas7_pad_mux ps_grp_pad_mux[] = { @@ -3234,6 +3235,15 @@ static struct atlas7_grp_mux ps_grp_mux = { .pad_mux_list = ps_grp_pad_mux, }; +static struct atlas7_pad_mux ps_no_dir_grp_pad_mux[] = { + MUX(1, 119, 2, N, N, N, N), +}; + +static struct atlas7_grp_mux ps_no_dir_grp_mux = { + .pad_mux_count = ARRAY_SIZE(ps_no_dir_grp_pad_mux), + .pad_mux_list = ps_no_dir_grp_pad_mux, +}; + static struct atlas7_pad_mux pwc_core_on_grp_pad_mux[] = { MUX(0, 8, 1, N, N, N, N), }; @@ -3743,7 +3753,7 @@ static struct atlas7_grp_mux sd2_wpb_grp1_mux = { .pad_mux_list = sd2_wpb_grp1_pad_mux, }; -static struct atlas7_pad_mux sd3_grp_pad_mux[] = { +static struct atlas7_pad_mux sd3_9_grp_pad_mux[] = { MUX(1, 85, 1, N, N, N, N), MUX(1, 86, 1, N, N, N, N), MUX(1, 87, 1, N, N, N, N), @@ -3752,9 +3762,9 @@ static struct atlas7_pad_mux sd3_grp_pad_mux[] = { MUX(1, 90, 1, N, N, N, N), }; -static struct atlas7_grp_mux sd3_grp_mux = { - .pad_mux_count = ARRAY_SIZE(sd3_grp_pad_mux), - .pad_mux_list = sd3_grp_pad_mux, +static struct atlas7_grp_mux sd3_9_grp_mux = { + .pad_mux_count = ARRAY_SIZE(sd3_9_grp_pad_mux), + .pad_mux_list = sd3_9_grp_pad_mux, }; static struct atlas7_pad_mux sd5_grp_pad_mux[] = { @@ -4296,6 +4306,9 @@ static struct atlas7_pad_mux vi_vip1_low8bit_grp_pad_mux[] = { MUX(1, 79, 1, N, N, N, N), MUX(1, 80, 1, N, N, N, N), MUX(1, 81, 1, N, N, N, N), + MUX(1, 82, 1, N, N, N, N), + MUX(1, 83, 1, N, N, N, N), + MUX(1, 84, 1, N, N, N, N), }; static struct atlas7_grp_mux vi_vip1_low8bit_grp_mux = { @@ -4307,11 +4320,14 @@ static struct atlas7_pad_mux vi_vip1_high8bit_grp_pad_mux[] = { MUX(1, 82, 1, N, N, N, N), MUX(1, 83, 1, N, N, N, N), MUX(1, 84, 1, N, N, N, N), - MUX(1, 108, 2, N, N, N, N), MUX(1, 103, 2, N, N, N, N), MUX(1, 104, 2, N, N, N, N), MUX(1, 105, 2, N, N, N, N), MUX(1, 106, 2, N, N, N, N), + MUX(1, 107, 2, N, N, N, N), + MUX(1, 102, 2, N, N, N, N), + MUX(1, 97, 2, N, N, N, N), + MUX(1, 98, 2, N, N, N, N), }; static struct atlas7_grp_mux vi_vip1_high8bit_grp_mux = { @@ -4598,9 +4614,11 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = { FUNCTION("ld_ldd_lck", ld_ldd_lck_grp, &ld_ldd_lck_grp_mux), FUNCTION("lr_lcdrom", lr_lcdrom_grp, &lr_lcdrom_grp_mux), FUNCTION("lvds_analog", lvds_analog_grp, &lvds_analog_grp_mux), - FUNCTION("nd_df", nd_df_grp, &nd_df_grp_mux), - FUNCTION("nd_df_nowp", nd_df_nowp_grp, &nd_df_nowp_grp_mux), + FUNCTION("nd_df_basic", nd_df_basic_grp, &nd_df_basic_grp_mux), + FUNCTION("nd_df_wp", nd_df_wp_grp, &nd_df_wp_grp_mux), + FUNCTION("nd_df_cs", nd_df_cs_grp, &nd_df_cs_grp_mux), FUNCTION("ps", ps_grp, &ps_grp_mux), + FUNCTION("ps_no_dir", ps_no_dir_grp, &ps_no_dir_grp_mux), FUNCTION("pwc_core_on", pwc_core_on_grp, &pwc_core_on_grp_mux), FUNCTION("pwc_ext_on", pwc_ext_on_grp, &pwc_ext_on_grp_mux), FUNCTION("pwc_gpio3_clk", pwc_gpio3_clk_grp, &pwc_gpio3_clk_grp_mux), @@ -4686,10 +4704,11 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = { FUNCTION("sd2_cdb_m1", sd2_cdb_grp1, &sd2_cdb_grp1_mux), FUNCTION("sd2_wpb_m0", sd2_wpb_grp0, &sd2_wpb_grp0_mux), FUNCTION("sd2_wpb_m1", sd2_wpb_grp1, &sd2_wpb_grp1_mux), - FUNCTION("sd3", sd3_grp, &sd3_grp_mux), + FUNCTION("sd3", sd3_9_grp, &sd3_9_grp_mux), FUNCTION("sd5", sd5_grp, &sd5_grp_mux), FUNCTION("sd6_m0", sd6_grp0, &sd6_grp0_mux), FUNCTION("sd6_m1", sd6_grp1, &sd6_grp1_mux), + FUNCTION("sd9", sd3_9_grp, &sd3_9_grp_mux), FUNCTION("sp0_ext_ldo_on", sp0_ext_ldo_on_grp, &sp0_ext_ldo_on_grp_mux), @@ -5097,6 +5116,14 @@ static int atlas7_pmx_set_mux(struct pinctrl_dev *pctldev, pr_debug("PMX DUMP ### Function:[%s] Group:[%s] #### START >>>\n", pmx_func->name, pin_grp->name); + /* the sd3 and sd9 pin select by SYS2PCI_SDIO9SEL register */ + if (pin_grp->pins == (unsigned int *)&sd3_9_pins) { + if (!strcmp(pmx_func->name, "sd9")) + writel(1, pmx->sys2pci_base + SYS2PCI_SDIO9SEL); + else + writel(0, pmx->sys2pci_base + SYS2PCI_SDIO9SEL); + } + grp_mux = pmx_func->grpmux; for (idx = 0; idx < grp_mux->pad_mux_count; idx++) { @@ -5385,12 +5412,27 @@ static int atlas7_pinmux_probe(struct platform_device *pdev) struct atlas7_pmx *pmx; struct device_node *np = pdev->dev.of_node; u32 banks = ATLAS7_PINCTRL_REG_BANKS; + struct device_node *sys2pci_np; + struct resource res; /* Create state holders etc for this driver */ pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL); if (!pmx) return -ENOMEM; + /* The sd3 and sd9 shared all pins, and the function select by + * SYS2PCI_SDIO9SEL register + */ + sys2pci_np = of_find_node_by_name(NULL, "sys2pci"); + if (!sys2pci_np) + return -EINVAL; + ret = of_address_to_resource(sys2pci_np, 0, &res); + if (ret) + return ret; + pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(pmx->sys2pci_base)) + return -ENOMEM; + pmx->dev = &pdev->dev; pmx->pctl_data = &atlas7_ioc_data; diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index 2a8d69725de8..edf40df05ec0 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -85,12 +85,16 @@ static int sirfsoc_dt_node_to_map(struct pinctrl_dev *pctldev, /* calculate number of maps required */ for_each_child_of_node(np_config, np) { ret = of_property_read_string(np, "sirf,function", &function); - if (ret < 0) + if (ret < 0) { + of_node_put(np); return ret; + } ret = of_property_count_strings(np, "sirf,pins"); - if (ret < 0) + if (ret < 0) { + of_node_put(np); return ret; + } count += ret; } diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile index 0e400ebeb8ff..37b8412ac8a3 100644 --- a/drivers/pinctrl/spear/Makefile +++ b/drivers/pinctrl/spear/Makefile @@ -1,7 +1,7 @@ # SPEAr pinmux support obj-$(CONFIG_PINCTRL_SPEAR_PLGPIO) += pinctrl-plgpio.o -obj-$(CONFIG_PINCTRL_SPEAR) += pinctrl-spear.o +obj-y += pinctrl-spear.o obj-$(CONFIG_PINCTRL_SPEAR3XX) += pinctrl-spear3xx.o obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig index e68fd951129a..f8dbc8bec0e1 100644 --- a/drivers/pinctrl/sunxi/Kconfig +++ b/drivers/pinctrl/sunxi/Kconfig @@ -51,8 +51,17 @@ config PINCTRL_SUN8I_A23_R depends on RESET_CONTROLLER select PINCTRL_SUNXI_COMMON +config PINCTRL_SUN8I_H3 + def_bool MACH_SUN8I + select PINCTRL_SUNXI_COMMON + config PINCTRL_SUN9I_A80 def_bool MACH_SUN9I select PINCTRL_SUNXI_COMMON +config PINCTRL_SUN9I_A80_R + def_bool MACH_SUN9I + depends on RESET_CONTROLLER + select PINCTRL_SUNXI_COMMON + endif diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile index e08029034510..ef82f22bb9ef 100644 --- a/drivers/pinctrl/sunxi/Makefile +++ b/drivers/pinctrl/sunxi/Makefile @@ -13,4 +13,6 @@ obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o +obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o +obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c new file mode 100644 index 000000000000..77d4cf047cee --- /dev/null +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c @@ -0,0 +1,515 @@ +/* + * Allwinner H3 SoCs pinctrl driver. + * + * Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com> + * + * Based on pinctrl-sun8i-a23.c, which is: + * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-sunxi.h" + +static const struct sunxi_desc_pin sun8i_h3_pins[] = { + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* TX */ + SUNXI_FUNCTION(0x3, "jtag"), /* MS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RX */ + SUNXI_FUNCTION(0x3, "jtag"), /* CK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION(0x3, "jtag"), /* DO */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION(0x3, "jtag"), /* DI */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart0"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart0"), /* RX */ + SUNXI_FUNCTION(0x3, "pwm0"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "sim"), /* PWREN */ + SUNXI_FUNCTION(0x3, "pwm1"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "sim"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "sim"), /* DATA */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "sim"), /* RST */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "sim"), /* DET */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */ + SUNXI_FUNCTION(0x3, "di"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */ + SUNXI_FUNCTION(0x3, "di"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* CS */ + SUNXI_FUNCTION(0x3, "uart3"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart3"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */ + SUNXI_FUNCTION(0x3, "uart3"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* MISO */ + SUNXI_FUNCTION(0x3, "uart3"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spdif"), /* OUT */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* SYNC */ + SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* CLK */ + SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* DOUT */ + SUNXI_FUNCTION(0x3, "sim"), /* VPPEN */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* DIN */ + SUNXI_FUNCTION(0x3, "sim"), /* VPPPP */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* WE */ + SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* ALE */ + SUNXI_FUNCTION(0x3, "spi0")), /* MISO */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* CLE */ + SUNXI_FUNCTION(0x3, "spi0")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */ + SUNXI_FUNCTION(0x3, "spi0")), /* CS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* RE */ + SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* RB1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXD3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXD2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXD1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXD0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXCTL/RXDV */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* RXERR */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXD3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXD2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXD1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXD0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* CRS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXCTL/TXEN */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* TXERR */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* CLKIN/COL */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* MDC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "emac")), /* MDIO */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* PCLK */ + SUNXI_FUNCTION(0x3, "ts")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* MCLK */ + SUNXI_FUNCTION(0x3, "ts")), /* ERR */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */ + SUNXI_FUNCTION(0x3, "ts")), /* SYNC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */ + SUNXI_FUNCTION(0x3, "ts")), /* DVLD */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D0 */ + SUNXI_FUNCTION(0x3, "ts")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D1 */ + SUNXI_FUNCTION(0x3, "ts")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D2 */ + SUNXI_FUNCTION(0x3, "ts")), /* D2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D3 */ + SUNXI_FUNCTION(0x3, "ts")), /* D3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D4 */ + SUNXI_FUNCTION(0x3, "ts")), /* D4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D5 */ + SUNXI_FUNCTION(0x3, "ts")), /* D5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D6 */ + SUNXI_FUNCTION(0x3, "ts")), /* D6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* D7 */ + SUNXI_FUNCTION(0x3, "ts")), /* D7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* SCK */ + SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* SDA */ + SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */ + SUNXI_FUNCTION(0x3, "jtag")), /* MS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */ + SUNXI_FUNCTION(0x3, "jtag")), /* DI */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart0")), /* TX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */ + SUNXI_FUNCTION(0x3, "jtag")), /* DO */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ + SUNXI_FUNCTION(0x3, "uart0")), /* RX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */ + SUNXI_FUNCTION(0x3, "jtag")), /* CK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PG_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PG_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PG_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PG_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PG_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PG_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PG_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PG_EINT7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)), /* PG_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)), /* PG_EINT9 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* SYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)), /* PG_EINT10 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)), /* PG_EINT11 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* DOUT */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)), /* PG_EINT12 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* DIN */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)), /* PG_EINT13 */ +}; + +static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = { + .pins = sun8i_h3_pins, + .npins = ARRAY_SIZE(sun8i_h3_pins), + .irq_banks = 2, +}; + +static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) +{ + return sunxi_pinctrl_init(pdev, + &sun8i_h3_pinctrl_data); +} + +static const struct of_device_id sun8i_h3_pinctrl_match[] = { + { .compatible = "allwinner,sun8i-h3-pinctrl", }, + {} +}; + +static struct platform_driver sun8i_h3_pinctrl_driver = { + .probe = sun8i_h3_pinctrl_probe, + .driver = { + .name = "sun8i-h3-pinctrl", + .of_match_table = sun8i_h3_pinctrl_match, + }, +}; +builtin_platform_driver(sun8i_h3_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c new file mode 100644 index 000000000000..42547ffa20a8 --- /dev/null +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c @@ -0,0 +1,181 @@ +/* + * Allwinner A80 SoCs special pins pinctrl driver. + * + * Copyright (C) 2014 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/reset.h> + +#include "pinctrl-sunxi.h" + +static const struct sunxi_desc_pin sun9i_a80_r_pins[] = { + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_uart"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_uart"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* TMS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PL_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* TCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PL_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* TDO */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PL_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* TDI */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PL_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_cir_rx"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PL_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "1wire"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PL_EINT7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_ps2"), /* SCK1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PL_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_ps2"), /* SDA1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PL_EINT9 */ + + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PM_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PM_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PM_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PM_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_i2s1"), /* LRCKR */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PM_EINT4 */ + + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_i2c1"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)), /* PM_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_i2c1"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)), /* PM_EINT9 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2s0"), /* MCLK */ + SUNXI_FUNCTION(0x3, "s_i2s1")), /* MCLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2s0"), /* BCLK */ + SUNXI_FUNCTION(0x3, "s_i2s1")), /* BCLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2s0"), /* LRCK */ + SUNXI_FUNCTION(0x3, "s_i2s1")), /* LRCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2s0"), /* DIN */ + SUNXI_FUNCTION(0x3, "s_i2s1")), /* DIN */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2s0"), /* DOUT */ + SUNXI_FUNCTION(0x3, "s_i2s1")), /* DOUT */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PM_EINT15 */ + + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(N, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2c0"), /* SCK */ + SUNXI_FUNCTION(0x3, "s_rsb")), /* SCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(N, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_i2c0"), /* SDA */ + SUNXI_FUNCTION(0x3, "s_rsb")), /* SDA */ +}; + +static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = { + .pins = sun9i_a80_r_pins, + .npins = ARRAY_SIZE(sun9i_a80_r_pins), + .pin_base = PL_BASE, + .irq_banks = 2, +}; + +static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev) +{ + return sunxi_pinctrl_init(pdev, + &sun9i_a80_r_pinctrl_data); +} + +static const struct of_device_id sun9i_a80_r_pinctrl_match[] = { + { .compatible = "allwinner,sun9i-a80-r-pinctrl", }, + {} +}; +MODULE_DEVICE_TABLE(of, sun9i_a80_r_pinctrl_match); + +static struct platform_driver sun9i_a80_r_pinctrl_driver = { + .probe = sun9i_a80_r_pinctrl_probe, + .driver = { + .name = "sun9i-a80-r-pinctrl", + .owner = THIS_MODULE, + .of_match_table = sun9i_a80_r_pinctrl_match, + }, +}; +module_platform_driver(sun9i_a80_r_pinctrl_driver); + +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("Allwinner A80 R_PIO pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig index ad907072e09f..7abd614dc383 100644 --- a/drivers/pinctrl/uniphier/Kconfig +++ b/drivers/pinctrl/uniphier/Kconfig @@ -1,32 +1,35 @@ -if ARCH_UNIPHIER - -config PINCTRL_UNIPHIER - bool +menuconfig PINCTRL_UNIPHIER + bool "UniPhier SoC pinctrl drivers" + depends on ARCH_UNIPHIER + depends on OF && MFD_SYSCON + default y select PINMUX select GENERIC_PINCONF +if PINCTRL_UNIPHIER + config PINCTRL_UNIPHIER_PH1_LD4 tristate "UniPhier PH1-LD4 SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y config PINCTRL_UNIPHIER_PH1_PRO4 tristate "UniPhier PH1-Pro4 SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y config PINCTRL_UNIPHIER_PH1_SLD8 tristate "UniPhier PH1-sLD8 SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y config PINCTRL_UNIPHIER_PH1_PRO5 tristate "UniPhier PH1-Pro5 SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y config PINCTRL_UNIPHIER_PROXSTREAM2 tristate "UniPhier ProXstream2 SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y config PINCTRL_UNIPHIER_PH1_LD6B tristate "UniPhier PH1-LD6b SoC pinctrl driver" - select PINCTRL_UNIPHIER + default y endif diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index f2d77fe696ac..cb8a9c2a3a1f 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -43,8 +43,6 @@ MODULE_LICENSE("GPL"); #define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492" -static int acpi_video; - MODULE_ALIAS("wmi:"DELL_EVENT_GUID); /* @@ -159,7 +157,8 @@ static void dell_wmi_process_key(int reported_key) /* Don't report brightness notifications that will also come via ACPI */ if ((key->keycode == KEY_BRIGHTNESSUP || - key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) + key->keycode == KEY_BRIGHTNESSDOWN) && + acpi_video_handles_brightness_key_presses()) return; sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true); @@ -398,7 +397,6 @@ static int __init dell_wmi_init(void) } dmi_walk(find_hk_type, NULL); - acpi_video = acpi_video_get_backlight_type() != acpi_backlight_vendor; err = dell_wmi_input_setup(); if (err) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 0bed4733c4f0..f453d5dc085e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3488,7 +3488,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* Do not issue duplicate brightness change events to * userspace. tpacpi_detect_brightness_capabilities() must have * been called before this point */ - if (acpi_video_get_backlight_type() != acpi_backlight_vendor) { + if (acpi_video_handles_brightness_key_presses()) { pr_info("This ThinkPad has standard ACPI backlight " "brightness control, supported by the ACPI " "video driver\n"); diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 153a493b5413..63452f20e3e9 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -74,7 +74,6 @@ void pnp_device_detach(struct pnp_dev *pnp_dev) if (pnp_dev->status == PNP_ATTACHED) pnp_dev->status = PNP_READY; mutex_unlock(&pnp_lock); - pnp_disable_dev(pnp_dev); } static int pnp_device_probe(struct device *dev) @@ -131,6 +130,11 @@ static int pnp_device_remove(struct device *dev) drv->remove(pnp_dev); pnp_dev->driver = NULL; } + + if (pnp_dev->active && + (!drv || !(drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE))) + pnp_disable_dev(pnp_dev); + pnp_device_detach(pnp_dev); return 0; } diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 943c1cb9566c..f700723ca5d6 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -343,6 +343,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) static const unsigned int mch_quirk_devices[] = { 0x0154, /* Ivy Bridge */ 0x0c00, /* Haswell */ + 0x1604, /* Broadwell */ }; static struct pci_dev *get_intel_host(void) diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index cc97f0869791..6c592dc71aee 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -388,7 +388,7 @@ static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) } /* per RAPL domain ops, in the order of rapl_domain_type */ -static struct powercap_zone_ops zone_ops[] = { +static const struct powercap_zone_ops zone_ops[] = { /* RAPL_DOMAIN_PACKAGE */ { .get_energy_uj = get_energy_counter, @@ -584,7 +584,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, return ret; } -static struct powercap_zone_constraint_ops constraint_ops = { +static const struct powercap_zone_constraint_ops constraint_ops = { .set_power_limit_uw = set_power_limit, .get_power_limit_uw = get_current_power_limit, .set_time_window_us = set_time_window, @@ -988,16 +988,16 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) } if (!power_ctrl_orig_val) - iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_PMC_READ, - rapl_defaults->floor_freq_reg_addr, - &power_ctrl_orig_val); + iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_CR_READ, + rapl_defaults->floor_freq_reg_addr, + &power_ctrl_orig_val); mdata = power_ctrl_orig_val; if (enable) { mdata &= ~(0x7f << 8); mdata |= 1 << 8; } - iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_PMC_WRITE, - rapl_defaults->floor_freq_reg_addr, mdata); + iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_CR_WRITE, + rapl_defaults->floor_freq_reg_addr, mdata); } static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, @@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { /* check if the domain is locked by BIOS */ - if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { + ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); + if (ret) + return ret; + if (locked) { pr_info("RAPL package %d domain %s locked by BIOS\n", rp->id, rd->name); - rd->state |= DOMAIN_STATE_BIOS_LOCKED; + rd->state |= DOMAIN_STATE_BIOS_LOCKED; } } diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 84419af16f77..14bde0db8c24 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -293,8 +293,8 @@ err_alloc: } static int create_constraints(struct powercap_zone *power_zone, - int nr_constraints, - struct powercap_zone_constraint_ops *const_ops) + int nr_constraints, + const struct powercap_zone_constraint_ops *const_ops) { int i; int ret = 0; @@ -492,13 +492,13 @@ static struct class powercap_class = { }; struct powercap_zone *powercap_register_zone( - struct powercap_zone *power_zone, - struct powercap_control_type *control_type, - const char *name, - struct powercap_zone *parent, - const struct powercap_zone_ops *ops, - int nr_constraints, - struct powercap_zone_constraint_ops *const_ops) + struct powercap_zone *power_zone, + struct powercap_control_type *control_type, + const char *name, + struct powercap_zone *parent, + const struct powercap_zone_ops *ops, + int nr_constraints, + const struct powercap_zone_constraint_ops *const_ops) { int result; int nr_attrs; diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index 284b587da65c..d6c853bbfa9f 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rtc); + rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, + &da9063_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + + da9063_data_to_tm(data, &rtc->alarm_time, rtc); + rtc->rtc_sync = false; + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, da9063_alarm_event, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "ALARM", rtc); - if (ret) { + if (ret) dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", irq_alarm, ret); - return ret; - } - - rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, - &da9063_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc->rtc_dev)) - return PTR_ERR(rtc->rtc_dev); - da9063_data_to_tm(data, &rtc->alarm_time, rtc); - rtc->rtc_sync = false; return ret; } diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c index 91ca0bc1b484..35c9aada07c8 100644 --- a/drivers/rtc/rtc-rk808.c +++ b/drivers/rtc/rtc-rk808.c @@ -56,6 +56,42 @@ struct rk808_rtc { int irq; }; +/* + * The Rockchip calendar used by the RK808 counts November with 31 days. We use + * these translation functions to convert its dates to/from the Gregorian + * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016 + * as the day when both calendars were in sync, and treat all other dates + * relative to that. + * NOTE: Other system software (e.g. firmware) that reads the same hardware must + * implement this exact same conversion algorithm, with the same anchor date. + */ +static time64_t nov2dec_transitions(struct rtc_time *tm) +{ + return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0); +} + +static void rockchip_to_gregorian(struct rtc_time *tm) +{ + /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */ + time64_t time = rtc_tm_to_time64(tm); + rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm); +} + +static void gregorian_to_rockchip(struct rtc_time *tm) +{ + time64_t extra_days = nov2dec_transitions(tm); + time64_t time = rtc_tm_to_time64(tm); + rtc_time64_to_tm(time - extra_days * 86400, tm); + + /* Compensate if we went back over Nov 31st (will work up to 2381) */ + if (nov2dec_transitions(tm) < extra_days) { + if (tm->tm_mon + 1 == 11) + tm->tm_mday++; /* This may result in 31! */ + else + rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm); + } +} + /* Read current time and date in RTC */ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) { @@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); + rockchip_to_gregorian(tm); dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); return ret; } @@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) u8 rtc_data[NUM_TIME_REGS]; int ret; + dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); + gregorian_to_rockchip(tm); rtc_data[0] = bin2bcd(tm->tm_sec); rtc_data[1] = bin2bcd(tm->tm_min); rtc_data[2] = bin2bcd(tm->tm_hour); @@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_data[4] = bin2bcd(tm->tm_mon + 1); rtc_data[5] = bin2bcd(tm->tm_year - 100); rtc_data[6] = bin2bcd(tm->tm_wday); - dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", - 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); /* Stop RTC while updating the RTC registers */ ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, @@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; + rockchip_to_gregorian(&alrm->time); ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); if (ret) { @@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, alrm->time.tm_min, alrm->time.tm_sec); + gregorian_to_rockchip(&alrm->time); alrm_data[0] = bin2bcd(alrm->time.tm_sec); alrm_data[1] = bin2bcd(alrm->time.tm_min); alrm_data[2] = bin2bcd(alrm->time.tm_hour); diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 7bc6df3100ef..6804354c42bd 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -40,10 +40,14 @@ struct read_info_sccb { u8 fac85; /* 85 */ u8 _pad_86[91 - 86]; /* 86-90 */ u8 flags; /* 91 */ - u8 _pad_92[100 - 92]; /* 92-99 */ + u8 _pad_92[99 - 92]; /* 92-98 */ + u8 hamaxpow; /* 99 */ u32 rnsize2; /* 100-103 */ u64 rnmax2; /* 104-111 */ - u8 _pad_112[120 - 112]; /* 112-119 */ + u8 _pad_112[116 - 112]; /* 112-115 */ + u8 fac116; /* 116 */ + u8 _pad_117[119 - 117]; /* 117-118 */ + u8 fac119; /* 119 */ u16 hcpua; /* 120-121 */ u8 _pad_122[4096 - 122]; /* 122-4095 */ } __packed __aligned(PAGE_SIZE); @@ -108,6 +112,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.facilities = sccb->facilities; sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_core_type = !!(sccb->fac84 & 0x01); + sclp.has_esca = !!(sccb->fac116 & 0x08); + sclp.has_hvs = !!(sccb->fac119 & 0x80); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; @@ -115,6 +121,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.rzm <<= 20; sclp.ibc = sccb->ibc; + if (sccb->hamaxpow && sccb->hamaxpow < 64) + sclp.hamax = (1UL << sccb->hamaxpow) - 1; + else + sclp.hamax = U64_MAX; + if (!sccb->hcpua) { if (MACHINE_IS_VM) sclp.max_cores = 64; @@ -131,6 +142,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) continue; sclp.has_siif = cpue->siif; sclp.has_sigpif = cpue->sigpif; + sclp.has_sief2 = cpue->sief2; break; } diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 0fdedadff7bc..2a67b496a9e2 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -88,14 +88,9 @@ vmcp_write(struct file *file, const char __user *buff, size_t count, if (count > 240) return -EINVAL; - cmd = kmalloc(count + 1, GFP_KERNEL); - if (!cmd) - return -ENOMEM; - if (copy_from_user(cmd, buff, count)) { - kfree(cmd); - return -EFAULT; - } - cmd[count] = '\0'; + cmd = memdup_user_nul(buff, count); + if (IS_ERR(cmd)) + return PTR_ERR(cmd); session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 0efb27f6f199..6c30e93ab8fa 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -782,24 +782,11 @@ static int ur_release(struct inode *inode, struct file *file) static loff_t ur_llseek(struct file *file, loff_t offset, int whence) { - loff_t newpos; - if ((file->f_flags & O_ACCMODE) != O_RDONLY) return -ESPIPE; /* seek allowed only for reader */ if (offset % PAGE_SIZE) return -ESPIPE; /* only multiples of 4K allowed */ - switch (whence) { - case 0: /* SEEK_SET */ - newpos = offset; - break; - case 1: /* SEEK_CUR */ - newpos = file->f_pos + offset; - break; - default: - return -EINVAL; - } - file->f_pos = newpos; - return newpos; + return no_seek_end_llseek(file, offset, whence); } static const struct file_operations ur_fops = { diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 823f41fc4bbd..3339b862ec17 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -385,18 +385,7 @@ static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) loff_t rc; mutex_lock(&zcore_mutex); - switch (orig) { - case 0: - file->f_pos = offset; - rc = file->f_pos; - break; - case 1: - file->f_pos += offset; - rc = file->f_pos; - break; - default: - rc = -EINVAL; - } + rc = no_seek_end_llseek(file, offset, orig); mutex_unlock(&zcore_mutex); return rc; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 61f768518a34..24ec282e15d8 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) status = ap_sm_recv(ap_dev); switch (status.response_code) { case AP_RESPONSE_NORMAL: - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0) { + ap_dev->state = AP_STATE_WORKING; return AP_WAIT_AGAIN; + } ap_dev->state = AP_STATE_IDLE; return AP_WAIT_NONE; case AP_RESPONSE_NO_PENDING_REPLY: diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 05c37d6d4afe..c3e22523faf3 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1677,11 +1677,8 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) ccw_device_set_offline(cgdev->cdev[1]); ccw_device_set_offline(cgdev->cdev[0]); - - if (priv->channel[CTCM_READ]) - channel_remove(priv->channel[CTCM_READ]); - if (priv->channel[CTCM_WRITE]) - channel_remove(priv->channel[CTCM_WRITE]); + channel_remove(priv->channel[CTCM_READ]); + channel_remove(priv->channel[CTCM_WRITE]); priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL; return 0; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 1766a20ebcb1..ec2e014e885c 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -981,6 +981,10 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *); +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, + enum qeth_ipa_funcs, + __u16, __u16, + enum qeth_prot_versions); int qeth_start_ipa_tx_checksum(struct qeth_card *); int qeth_set_rx_csum(struct qeth_card *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 31ac53fa5cee..787153764120 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2684,8 +2684,6 @@ void qeth_print_status_message(struct qeth_card *card) sprintf(card->info.mcl_level, "%02x%02x", card->info.mcl_level[2], card->info.mcl_level[3]); - - card->info.mcl_level[QETH_MCL_LENGTH] = 0; break; } /* fallthrough */ @@ -5297,10 +5295,10 @@ static int qeth_setassparms_cb(struct qeth_card *card, return 0; } -static struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, - enum qeth_ipa_funcs ipa_func, - __u16 cmd_code, __u16 len, - enum qeth_prot_versions prot) +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, __u16 len, + enum qeth_prot_versions prot) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -5319,6 +5317,7 @@ static struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, return iob; } +EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); int qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob, __u16 len, long data, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8f1b091e1732..80b1979e8d95 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1126,6 +1126,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) qeth_l2_request_initial_mac(card); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); + netif_carrier_off(card->dev); return register_netdev(card->dev); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 543960e96b42..7c8c68c26540 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1043,28 +1043,6 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, return 0; } -static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( - struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code, - __u16 len, enum qeth_prot_versions prot) -{ - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "getasscm"); - iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); - - if (iob) { - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setassparms.hdr.assist_no = ipa_func; - cmd->data.setassparms.hdr.length = 8 + len; - cmd->data.setassparms.hdr.command_code = cmd_code; - cmd->data.setassparms.hdr.return_code = 0; - cmd->data.setassparms.hdr.seq_no = 0; - } - - return iob; -} - #ifdef CONFIG_QETH_IPV6 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code) @@ -1073,7 +1051,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 4, "simassp6"); - iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, 0, QETH_PROT_IPV6); if (!iob) return -ENOMEM; @@ -2344,10 +2322,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); - iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_QUERY_INFO, - sizeof(struct qeth_arp_query_data) - sizeof(char), - prot); + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_QUERY_INFO, + sizeof(struct qeth_arp_query_data) + - sizeof(char), + prot); if (!iob) return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -2439,7 +2418,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, return -EOPNOTSUPP; } - iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_ARP_ADD_ENTRY, sizeof(struct qeth_arp_cache_entry), QETH_PROT_IPV4); @@ -2480,7 +2459,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, return -EOPNOTSUPP; } memcpy(buf, entry, 12); - iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_ARP_REMOVE_ENTRY, 12, QETH_PROT_IPV4); @@ -2818,7 +2797,7 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) { unsigned long tcpd = (unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4; - int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); + int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data); int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); elements += qeth_get_elements_for_frags(skb); @@ -3220,6 +3199,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); + netif_carrier_off(card->dev); return register_netdev(card->dev); } diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index b2a1a81e6fc8..1b831598df7c 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, return vq; } +static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, + __u32 activity) +{ + if (vcdev->curr_io & activity) { + switch (activity) { + case VIRTIO_CCW_DOING_READ_FEAT: + case VIRTIO_CCW_DOING_WRITE_FEAT: + case VIRTIO_CCW_DOING_READ_CONFIG: + case VIRTIO_CCW_DOING_WRITE_CONFIG: + case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_SET_VQ: + case VIRTIO_CCW_DOING_SET_IND: + case VIRTIO_CCW_DOING_SET_CONF_IND: + case VIRTIO_CCW_DOING_RESET: + case VIRTIO_CCW_DOING_READ_VQ_CONF: + case VIRTIO_CCW_DOING_SET_IND_ADAPTER: + case VIRTIO_CCW_DOING_SET_VIRTIO_REV: + vcdev->curr_io &= ~activity; + wake_up(&vcdev->wait_q); + break; + default: + /* don't know what to do... */ + dev_warn(&vcdev->cdev->dev, + "Suspicious activity '%08x'\n", activity); + WARN_ON(1); + break; + } + } +} + static void virtio_ccw_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) @@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, if (!vcdev) return; + if (IS_ERR(irb)) { + vcdev->err = PTR_ERR(irb); + virtio_ccw_check_activity(vcdev, activity); + /* Don't poke around indicators, something's wrong. */ + return; + } /* Check if it's a notification from the host. */ if ((intparm == 0) && (scsw_stctl(&irb->scsw) == @@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, /* Map everything else to -EIO. */ vcdev->err = -EIO; } - if (vcdev->curr_io & activity) { - switch (activity) { - case VIRTIO_CCW_DOING_READ_FEAT: - case VIRTIO_CCW_DOING_WRITE_FEAT: - case VIRTIO_CCW_DOING_READ_CONFIG: - case VIRTIO_CCW_DOING_WRITE_CONFIG: - case VIRTIO_CCW_DOING_WRITE_STATUS: - case VIRTIO_CCW_DOING_SET_VQ: - case VIRTIO_CCW_DOING_SET_IND: - case VIRTIO_CCW_DOING_SET_CONF_IND: - case VIRTIO_CCW_DOING_RESET: - case VIRTIO_CCW_DOING_READ_VQ_CONF: - case VIRTIO_CCW_DOING_SET_IND_ADAPTER: - case VIRTIO_CCW_DOING_SET_VIRTIO_REV: - vcdev->curr_io &= ~activity; - wake_up(&vcdev->wait_q); - break; - default: - /* don't know what to do... */ - dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", - activity); - WARN_ON(1); - break; - } - } + virtio_ccw_check_activity(vcdev, activity); for_each_set_bit(i, &vcdev->indicators, sizeof(vcdev->indicators) * BITS_PER_BYTE) { /* The bit clear must happen before the vring kick. */ diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 5843288f64bc..e077ebd89319 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c @@ -390,16 +390,9 @@ static int copyin_string(char __user *user, size_t len, char **ptr) if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0) return -EINVAL; - tmp = kmalloc(len + 1, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - if (copy_from_user(tmp, user, len)) { - kfree(tmp); - return -EFAULT; - } - - tmp[len] = '\0'; + tmp = memdup_user_nul(user, len); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); *ptr = tmp; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f4424063b860..0efe7112fc1f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1625,7 +1625,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) /* crc offload */ if (likely(lport->crc_offload)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb); skb->csum_offset = skb->len; crc = 0; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index e4b799837948..459abe1dcc87 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev) struct scsi_device *sdev = to_scsi_device(dev); int err = 0; - if (pm && pm->runtime_suspend) { - err = blk_pre_runtime_suspend(sdev->request_queue); - if (err) - return err; + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (pm && pm->runtime_suspend) err = pm->runtime_suspend(dev); - blk_post_runtime_suspend(sdev->request_queue, err); - } + blk_post_runtime_suspend(sdev->request_queue, err); + return err; } @@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; - if (pm && pm->runtime_resume) { - blk_pre_runtime_resume(sdev->request_queue); + blk_pre_runtime_resume(sdev->request_queue); + if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); - blk_post_runtime_resume(sdev->request_queue, err); - } + blk_post_runtime_resume(sdev->request_queue, err); + return err; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3d22fc3e3c1a..4e08d1cd704d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2885,10 +2885,13 @@ static int sd_revalidate_disk(struct gendisk *disk) /* * Use the device's preferred I/O size for reads and writes - * unless the reported value is unreasonably large (or garbage). + * unless the reported value is unreasonably small, large, or + * garbage. */ - if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS) + if (sdkp->opt_xfer_blocks && + sdkp->opt_xfer_blocks <= dev_max && + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && + sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) rw_max = q->limits.io_opt = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); else diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index dcb0d76d7312..044d06410d4c 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc, static int ses_recv_diag(struct scsi_device *sdev, int page_code, void *buf, int bufflen) { + int ret; unsigned char cmd[] = { RECEIVE_DIAGNOSTIC, 1, /* Set PCV bit */ @@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, bufflen & 0xff, 0 }; + unsigned char recv_page_code; - return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, NULL, SES_TIMEOUT, SES_RETRIES, NULL); + if (unlikely(!ret)) + return ret; + + recv_page_code = ((unsigned char *)buf)[0]; + + if (likely(recv_page_code == page_code)) + return ret; + + /* successful diagnostic but wrong page code. This happens to some + * USB devices, just print a message and pretend there was an error */ + + sdev_printk(KERN_ERR, sdev, + "Wrong diagnostic page; asked for %d got %u\n", + page_code, recv_page_code); + + return -EINVAL; } static int ses_send_diag(struct scsi_device *sdev, int page_code, @@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, if (desc_ptr) desc_ptr += len; - if (addl_desc_ptr) + if (addl_desc_ptr && + /* only find additional descriptions for specific devices */ + (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || + /* these elements are optional */ + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) addl_desc_ptr += addl_desc_ptr[1] + 2; } diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index b04b05a0904e..0ad66fa9bb1a 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -116,7 +116,7 @@ static const struct spm_reg_data spm_reg_8064_cpu = { static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv); -typedef int (*idle_fn)(int); +typedef int (*idle_fn)(void); static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops); static inline void spm_register_write(struct spm_driver_data *drv, @@ -179,10 +179,10 @@ static int qcom_pm_collapse(unsigned long int unused) return -1; } -static int qcom_cpu_spc(int cpu) +static int qcom_cpu_spc(void) { int ret; - struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); + struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv); spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC); ret = cpu_suspend(0, qcom_pm_collapse); @@ -197,9 +197,9 @@ static int qcom_cpu_spc(int cpu) return ret; } -static int qcom_idle_enter(int cpu, unsigned long index) +static int qcom_idle_enter(unsigned long index) { - return per_cpu(qcom_idle_ops, cpu)[index](cpu); + return __this_cpu_read(qcom_idle_ops)[index](); } static const struct of_device_id qcom_idle_state_match[] __initconst = { diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 59a11437db70..39412c9097c6 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi) { unsigned int val; - regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); + regmap_read(dspi->regmap, SPI_CTAR(0), &val); return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; } @@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word) return SPI_PUSHR_TXDATA(d16) | SPI_PUSHR_PCS(dspi->cs) | - SPI_PUSHR_CTAS(dspi->cs) | + SPI_PUSHR_CTAS(0) | SPI_PUSHR_CONT; } @@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi) */ if (tx_word && (dspi->len == 1)) { dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), + regmap_update_bits(dspi->regmap, SPI_CTAR(0), SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); tx_word = 0; } @@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi) if (tx_word && (dspi->len == 1)) { dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), + regmap_update_bits(dspi->regmap, SPI_CTAR(0), SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); tx_word = 0; } @@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master, regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); - regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), + regmap_write(dspi->regmap, SPI_CTAR(0), dspi->cur_chip->ctar_val); trans_mode = dspi->devtype_data->trans_mode; @@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) if (!dspi->len) { if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { regmap_update_bits(dspi->regmap, - SPI_CTAR(dspi->cs), + SPI_CTAR(0), SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2b0a8ec3affb..151b01c25b40 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1623,6 +1623,9 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_OK; } + if (spi->irq < 0) + spi->irq = acpi_dev_gpio_irq_get(adev, 0); + adev->power.flags.ignore_parent = true; strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); if (spi_add_device(spi)) { @@ -1705,7 +1708,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) master->bus_num = -1; master->num_chipselect = 1; master->dev.class = &spi_master_class; - master->dev.parent = get_device(dev); + master->dev.parent = dev; spi_master_set_devdata(master, &master[1]); return master; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 91a0fcd72423..d0e7dfc647cf 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; + spin_lock_irq(&spidev->spi_lock); if (spidev->spi) spidev->speed_hz = spidev->spi->max_speed_hz; /* ... after we unbound from the underlying device? */ - spin_lock_irq(&spidev->spi_lock); dofree = (spidev->spi == NULL); spin_unlock_irq(&spidev->spi_lock); diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 149214beeda9..0c675861623f 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -82,7 +82,7 @@ config SSB_SDIOHOST config SSB_HOST_SOC bool "Support for SSB bus on SoC" - depends on SSB + depends on SSB && BCM47XX_NVRAM help Host interface for a SSB directly mapped into memory. This is for some Broadcom SoCs from the BCM47xx and BCM53xx lines. diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c index c809f255af34..d62992dc08b2 100644 --- a/drivers/ssb/host_soc.c +++ b/drivers/ssb/host_soc.c @@ -8,6 +8,7 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include <linux/bcm47xx_nvram.h> #include <linux/ssb/ssb.h> #include "ssb_private.h" @@ -171,3 +172,39 @@ const struct ssb_bus_ops ssb_host_soc_ops = { .block_write = ssb_host_soc_block_write, #endif }; + +int ssb_host_soc_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + char buf[20]; + int len, err; + + /* Fill boardinfo structure */ + memset(&iv->boardinfo, 0, sizeof(struct ssb_boardinfo)); + + len = bcm47xx_nvram_getenv("boardvendor", buf, sizeof(buf)); + if (len > 0) { + err = kstrtou16(strim(buf), 0, &iv->boardinfo.vendor); + if (err) + pr_warn("Couldn't parse nvram board vendor entry with value \"%s\"\n", + buf); + } + if (!iv->boardinfo.vendor) + iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM; + + len = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf)); + if (len > 0) { + err = kstrtou16(strim(buf), 0, &iv->boardinfo.type); + if (err) + pr_warn("Couldn't parse nvram board type entry with value \"%s\"\n", + buf); + } + + memset(&iv->sprom, 0, sizeof(struct ssb_sprom)); + ssb_fill_sprom_with_fallback(bus, &iv->sprom); + + if (bcm47xx_nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) + iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); + + return 0; +} diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 5d1e9a0fc389..cde5ff7529eb 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -762,15 +762,14 @@ EXPORT_SYMBOL(ssb_bus_sdiobus_register); #endif /* CONFIG_SSB_PCMCIAHOST */ #ifdef CONFIG_SSB_HOST_SOC -int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr, - ssb_invariants_func_t get_invariants) +int ssb_bus_host_soc_register(struct ssb_bus *bus, unsigned long baseaddr) { int err; bus->bustype = SSB_BUSTYPE_SSB; bus->ops = &ssb_host_soc_ops; - err = ssb_bus_register(bus, get_invariants, baseaddr); + err = ssb_bus_register(bus, ssb_host_soc_get_invariants, baseaddr); if (!err) { ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n", baseaddr); diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 15bfd5c7d2d7..c2f5d3969c8b 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -163,6 +163,9 @@ static inline int ssb_sdio_init(struct ssb_bus *bus) #ifdef CONFIG_SSB_HOST_SOC extern const struct ssb_bus_ops ssb_host_soc_ops; + +extern int ssb_host_soc_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); #endif /* scan.c */ diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 195c41d7bd53..0813163f962f 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, err: sg = table->sgl; for (i -= 1; i >= 0; i--) { - gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), sg->length); sg = sg_next(sg); } @@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) DMA_BIDIRECTIONAL); for_each_sg(table->sgl, sg, table->nents, i) { - gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), sg->length); } chunk_heap->allocated -= allocated_size; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index 04a4653c549a..cf8e43bd3c03 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -69,7 +69,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn) /* ZC if the socket supports scatter/gather and doesn't need software * checksums */ - return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0); + return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0); } int diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 8d8220f3db83..da5f443a0768 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -126,9 +126,7 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, rc = cl_file_inode_init(inode, md); } if (rc != 0) { - make_bad_inode(inode); - unlock_new_inode(inode); - iput(inode); + iget_failed(inode); inode = ERR_PTR(rc); } else unlock_new_inode(inode); diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 32c4cf48b318..2610348f6c72 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -118,12 +118,20 @@ failed: return rc; } -static const char *ll_follow_link(struct dentry *dentry, void **cookie) +static void ll_put_link(void *p) +{ + ptlrpc_req_finished(p); +} + +static const char *ll_get_link(struct dentry *dentry, + struct inode *inode, + struct delayed_call *done) { - struct inode *inode = d_inode(dentry); struct ptlrpc_request *request = NULL; int rc; char *symname = NULL; + if (!dentry) + return ERR_PTR(-ECHILD); CDEBUG(D_VFSTRACE, "VFS Op\n"); ll_inode_size_lock(inode); @@ -135,22 +143,16 @@ static const char *ll_follow_link(struct dentry *dentry, void **cookie) } /* symname may contain a pointer to the request message buffer, - * we delay request releasing until ll_put_link then. + * we delay request releasing then. */ - *cookie = request; + set_delayed_call(done, ll_put_link, request); return symname; } -static void ll_put_link(struct inode *unused, void *cookie) -{ - ptlrpc_req_finished(cookie); -} - const struct inode_operations ll_fast_symlink_inode_operations = { .readlink = generic_readlink, .setattr = ll_setattr, - .follow_link = ll_follow_link, - .put_link = ll_put_link, + .get_link = ll_get_link, .getattr = ll_getattr, .permission = ll_inode_permission, .setxattr = ll_setxattr, diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 4d91538668ac..8eb43f192d1f 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -60,10 +60,10 @@ static int get_xattr_type(const char *name) { - if (!strcmp(name, POSIX_ACL_XATTR_ACCESS)) + if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)) return XATTR_ACL_ACCESS_T; - if (!strcmp(name, POSIX_ACL_XATTR_DEFAULT)) + if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT)) return XATTR_ACL_DEFAULT_T; if (!strncmp(name, XATTR_USER_PREFIX, diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index 19fcb1cb8231..8fdf0ac4f287 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c @@ -999,8 +999,8 @@ static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev, timeout = BCM2048_AUTO_SEARCH_TIMEOUT; if (!wait_for_completion_timeout(&bdev->compl, - msecs_to_jiffies(timeout))) - dev_err(&bdev->client->dev, "IRQ timeout.\n"); + msecs_to_jiffies(timeout))) + dev_err(&bdev->client->dev, "IRQ timeout.\n"); if (value) if (!bdev->scan_state) diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig index 4de2f082491d..f40a06954a92 100644 --- a/drivers/staging/media/davinci_vpfe/Kconfig +++ b/drivers/staging/media/davinci_vpfe/Kconfig @@ -2,6 +2,8 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF depends on HAS_DMA + depends on VIDEO_V4L2_SUBDEV_API + depends on VIDEO_DAVINCI_VPBE_DISPLAY select VIDEOBUF2_DMA_CONTIG help Support for DM365 VPFE based Media Controller Capture driver. diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c index b1dfa2ccc4ef..c492914768ea 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c @@ -1536,8 +1536,9 @@ ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, * @fse: pointer to v4l2_subdev_frame_size_enum structure. */ static int -ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) +ipipe_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) { struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c index 2a3a56b88de1..b1d5e23ae6e0 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c @@ -254,7 +254,7 @@ int config_ipipe_hw(struct vpfe_ipipe_device *ipipe) void __iomem *ipipe_base = ipipe->base_addr; struct v4l2_mbus_framefmt *outformat; u32 color_pat; - u32 ipipe_mode; + int ipipe_mode; u32 data_path; /* enable clock to IPIPE */ diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c index acb293ed9c91..d892fee3f52f 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c +++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c @@ -63,16 +63,11 @@ resizer_calculate_line_length(u32 pix, int width, int height, if (pix == MEDIA_BUS_FMT_UYVY8_2X8 || pix == MEDIA_BUS_FMT_SGRBG12_1X12) { *line_len = width << 1; - } else if (pix == MEDIA_BUS_FMT_Y8_1X8 || - pix == MEDIA_BUS_FMT_UV8_1X8) { - *line_len = width; - *line_len_c = width; } else { - /* YUV 420 */ - /* round width to upper 32 byte boundary */ *line_len = width; *line_len_c = width; } + /* adjust the line len to be a multiple of 32 */ *line_len += 31; *line_len &= ~0x1f; diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c index 01df0683e950..69b678ca40c0 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c @@ -227,7 +227,7 @@ static int vpfe_enable_clock(struct vpfe_device *vpfe_dev) return 0; vpfe_dev->clks = kcalloc(vpfe_cfg->num_clocks, - sizeof(struct clock *), GFP_KERNEL); + sizeof(*vpfe_dev->clks), GFP_KERNEL); if (vpfe_dev->clks == NULL) return -ENOMEM; diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 0fdff91624fd..adb2bc8811ab 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -470,7 +470,7 @@ void vpfe_video_process_buffer_complete(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; - v4l2_get_timestamp(&video->cur_frm->vb.timestamp); + video->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) video->cur_frm = video->next_frm; @@ -1078,7 +1078,7 @@ vpfe_g_dv_timings(struct file *file, void *fh, * the buffer nbuffers and buffer size */ static int -vpfe_buffer_queue_setup(struct vb2_queue *vq, const void *parg, +vpfe_buffer_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c index 534b8103ae80..ff1926ca1f96 100644 --- a/drivers/staging/media/lirc/lirc_imon.c +++ b/drivers/staging/media/lirc/lirc_imon.c @@ -885,12 +885,14 @@ static int imon_probe(struct usb_interface *interface, vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); /* Everything went fine. Just unlock and return retval (with is 0) */ + mutex_unlock(&context->ctx_lock); goto driver_unlock; unregister_lirc: lirc_unregister_driver(driver->minor); free_tx_urb: + mutex_unlock(&context->ctx_lock); usb_free_urb(tx_urb); free_rx_urb: diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c index c1408342b1d0..d009bcb439f0 100644 --- a/drivers/staging/media/lirc/lirc_parallel.c +++ b/drivers/staging/media/lirc/lirc_parallel.c @@ -33,7 +33,7 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/ioport.h> -#include <linux/time.h> +#include <linux/ktime.h> #include <linux/mm.h> #include <linux/delay.h> @@ -144,25 +144,22 @@ static void lirc_off(void) static unsigned int init_lirc_timer(void) { - struct timeval tv, now; + ktime_t kt, now, timeout; unsigned int level, newlevel, timeelapsed, newtimer; int count = 0; - do_gettimeofday(&tv); - tv.tv_sec++; /* wait max. 1 sec. */ + kt = ktime_get(); + /* wait max. 1 sec. */ + timeout = ktime_add_ns(kt, NSEC_PER_SEC); level = lirc_get_timer(); do { newlevel = lirc_get_timer(); if (level == 0 && newlevel != 0) count++; level = newlevel; - do_gettimeofday(&now); - } while (count < 1000 && (now.tv_sec < tv.tv_sec - || (now.tv_sec == tv.tv_sec - && now.tv_usec < tv.tv_usec))); - - timeelapsed = (now.tv_sec + 1 - tv.tv_sec)*1000000 - + (now.tv_usec - tv.tv_usec); + now = ktime_get(); + } while (count < 1000 && (ktime_before(now, timeout))); + timeelapsed = ktime_us_delta(now, kt); if (count >= 1000 && timeelapsed > 0) { if (default_timer == 0) { /* autodetect timer */ @@ -220,8 +217,8 @@ static void rbuf_write(int signal) static void lirc_lirc_irq_handler(void *blah) { - struct timeval tv; - static struct timeval lasttv; + ktime_t kt, delkt; + static ktime_t lastkt; static int init; long signal; int data; @@ -244,16 +241,14 @@ static void lirc_lirc_irq_handler(void *blah) #ifdef LIRC_TIMER if (init) { - do_gettimeofday(&tv); + kt = ktime_get(); - signal = tv.tv_sec - lasttv.tv_sec; - if (signal > 15) + delkt = ktime_sub(kt, lastkt); + if (ktime_compare(delkt, ktime_set(15, 0)) > 0) /* really long time */ data = PULSE_MASK; else - data = (int) (signal*1000000 + - tv.tv_usec - lasttv.tv_usec + - LIRC_SFH506_DELAY); + data = (int)(ktime_to_us(delkt) + LIRC_SFH506_DELAY); rbuf_write(data); /* space */ } else { @@ -301,7 +296,7 @@ static void lirc_lirc_irq_handler(void *blah) data = 1; rbuf_write(PULSE_BIT|data); /* pulse */ } - do_gettimeofday(&lasttv); + lastkt = ktime_get(); #else /* add your code here */ #endif diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c index f2dca69c2bc0..2218d0042030 100644 --- a/drivers/staging/media/lirc/lirc_sasem.c +++ b/drivers/staging/media/lirc/lirc_sasem.c @@ -42,6 +42,7 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/usb.h> +#include <linux/ktime.h> #include <media/lirc.h> #include <media/lirc_dev.h> @@ -111,7 +112,7 @@ struct sasem_context { } tx; /* for dealing with repeat codes (wish there was a toggle bit!) */ - struct timeval presstime; + ktime_t presstime; char lastcode[8]; int codesaved; }; @@ -566,8 +567,8 @@ static void incoming_packet(struct sasem_context *context, { int len = urb->actual_length; unsigned char *buf = urb->transfer_buffer; - long ms; - struct timeval tv; + u64 ns; + ktime_t kt; if (len != 8) { dev_warn(&context->dev->dev, @@ -584,9 +585,8 @@ static void incoming_packet(struct sasem_context *context, */ /* get the time since the last button press */ - do_gettimeofday(&tv); - ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 + - (tv.tv_usec - context->presstime.tv_usec) / 1000; + kt = ktime_get(); + ns = ktime_to_ns(ktime_sub(kt, context->presstime)); if (memcmp(buf, "\x08\0\0\0\0\0\0\0", 8) == 0) { /* @@ -600,10 +600,9 @@ static void incoming_packet(struct sasem_context *context, * in that time and then get a false repeat of the previous * press but it is long enough for a genuine repeat */ - if ((ms < 250) && (context->codesaved != 0)) { + if ((ns < 250 * NSEC_PER_MSEC) && (context->codesaved != 0)) { memcpy(buf, &context->lastcode, 8); - context->presstime.tv_sec = tv.tv_sec; - context->presstime.tv_usec = tv.tv_usec; + context->presstime = kt; } } else { /* save the current valid code for repeats */ @@ -613,8 +612,7 @@ static void incoming_packet(struct sasem_context *context, * just for safety reasons */ context->codesaved = 1; - context->presstime.tv_sec = tv.tv_sec; - context->presstime.tv_usec = tv.tv_usec; + context->presstime = kt; } lirc_buffer_write(context->driver->rbuf, buf); diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c index 64a7b2fc5289..b798b311d32c 100644 --- a/drivers/staging/media/lirc/lirc_serial.c +++ b/drivers/staging/media/lirc/lirc_serial.c @@ -59,7 +59,7 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/serial_reg.h> -#include <linux/time.h> +#include <linux/ktime.h> #include <linux/string.h> #include <linux/types.h> #include <linux/wait.h> @@ -204,7 +204,7 @@ static struct lirc_serial hardware[] = { #define RBUF_LEN 256 -static struct timeval lasttv = {0, 0}; +static ktime_t lastkt; static struct lirc_buffer rbuf; @@ -542,10 +542,10 @@ static void frbwrite(int l) static irqreturn_t lirc_irq_handler(int i, void *blah) { - struct timeval tv; + ktime_t kt; int counter, dcd; u8 status; - long deltv; + ktime_t delkt; int data; static int last_dcd = -1; @@ -565,7 +565,7 @@ static irqreturn_t lirc_irq_handler(int i, void *blah) if ((status & hardware[type].signal_pin_change) && sense != -1) { /* get current time */ - do_gettimeofday(&tv); + kt = ktime_get(); /* New mode, written by Trent Piepho <xyzzy@u.washington.edu>. */ @@ -594,34 +594,20 @@ static irqreturn_t lirc_irq_handler(int i, void *blah) dcd = (status & hardware[type].signal_pin) ? 1 : 0; if (dcd == last_dcd) { - pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n", - dcd, sense, - tv.tv_sec, lasttv.tv_sec, - (unsigned long)tv.tv_usec, - (unsigned long)lasttv.tv_usec); + pr_warn("ignoring spike: %d %d %llx %llx\n", + dcd, sense, ktime_to_us(kt), + ktime_to_us(lastkt)); continue; } - deltv = tv.tv_sec-lasttv.tv_sec; - if (tv.tv_sec < lasttv.tv_sec || - (tv.tv_sec == lasttv.tv_sec && - tv.tv_usec < lasttv.tv_usec)) { - pr_warn("AIEEEE: your clock just jumped backwards\n"); - pr_warn("%d %d %lx %lx %lx %lx\n", - dcd, sense, - tv.tv_sec, lasttv.tv_sec, - (unsigned long)tv.tv_usec, - (unsigned long)lasttv.tv_usec); - data = PULSE_MASK; - } else if (deltv > 15) { + delkt = ktime_sub(kt, lastkt); + if (ktime_compare(delkt, ktime_set(15, 0)) > 0) { data = PULSE_MASK; /* really long time */ if (!(dcd^sense)) { /* sanity check */ - pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n", - dcd, sense, - tv.tv_sec, lasttv.tv_sec, - (unsigned long)tv.tv_usec, - (unsigned long)lasttv.tv_usec); + pr_warn("AIEEEE: %d %d %llx %llx\n", + dcd, sense, ktime_to_us(kt), + ktime_to_us(lastkt)); /* * detecting pulse while this * MUST be a space! @@ -629,11 +615,9 @@ static irqreturn_t lirc_irq_handler(int i, void *blah) sense = sense ? 0 : 1; } } else - data = (int) (deltv*1000000 + - tv.tv_usec - - lasttv.tv_usec); + data = (int) ktime_to_us(delkt); frbwrite(dcd^sense ? data : (data|PULSE_BIT)); - lasttv = tv; + lastkt = kt; last_dcd = dcd; wake_up_interruptible(&rbuf.wait_poll); } @@ -790,7 +774,7 @@ static int set_use_inc(void *data) unsigned long flags; /* initialize timestamp */ - do_gettimeofday(&lasttv); + lastkt = ktime_get(); spin_lock_irqsave(&hardware[type].lock, flags); @@ -979,7 +963,7 @@ static int lirc_serial_resume(struct platform_device *dev) spin_lock_irqsave(&hardware[type].lock, flags); /* Enable Interrupt */ - do_gettimeofday(&lasttv); + lastkt = ktime_get(); soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); off(); diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig index 8d4e3bd1bfe1..46183464ee79 100644 --- a/drivers/staging/media/omap4iss/Kconfig +++ b/drivers/staging/media/omap4iss/Kconfig @@ -1,6 +1,6 @@ config VIDEO_OMAP4 tristate "OMAP 4 Camera support" - depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 depends on HAS_DMA select MFD_SYSCON select VIDEOBUF2_DMA_CONTIG diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index aa76ccda5b42..e27a988540a6 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -601,8 +601,8 @@ static int iss_pipeline_disable(struct iss_pipeline *pipe, subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 0); if (ret < 0) { - dev_dbg(iss->dev, "%s: module stop timeout.\n", - subdev->name); + dev_warn(iss->dev, "%s: module stop timeout.\n", + subdev->name); /* If the entity failed to stopped, assume it has * crashed. Mark it as such, the ISS will be reset when * applications will release it. diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h index 35df8b4709e6..5929357fe687 100644 --- a/drivers/staging/media/omap4iss/iss.h +++ b/drivers/staging/media/omap4iss/iss.h @@ -20,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/wait.h> -#include <media/omap4iss.h> +#include <linux/platform_data/media/omap4iss.h> #include "iss_regs.h" #include "iss_csiphy.h" diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c index c6e6d47ac57f..b941035139ae 100644 --- a/drivers/staging/media/omap4iss/iss_csi2.c +++ b/drivers/staging/media/omap4iss/iss_csi2.c @@ -674,6 +674,9 @@ static void csi2_isr_ctx(struct iss_csi2_device *csi2, status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n)); iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status); + if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping)) + return; + /* Propagate frame number */ if (status & CSI2_CTX_IRQ_FS) { struct iss_pipeline *pipe = @@ -776,9 +779,6 @@ void omap4iss_csi2_isr(struct iss_csi2_device *csi2) pipe->error = true; } - if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping)) - return; - /* Successful cases */ if (csi2_irqstatus & CSI2_IRQ_CONTEXT0) csi2_isr_ctx(csi2, &csi2->contexts[0]); diff --git a/drivers/staging/media/omap4iss/iss_csiphy.h b/drivers/staging/media/omap4iss/iss_csiphy.h index e9ca43955654..a0f2d974daeb 100644 --- a/drivers/staging/media/omap4iss/iss_csiphy.h +++ b/drivers/staging/media/omap4iss/iss_csiphy.h @@ -14,7 +14,7 @@ #ifndef OMAP4_ISS_CSI_PHY_H #define OMAP4_ISS_CSI_PHY_H -#include <media/omap4iss.h> +#include <linux/platform_data/media/omap4iss.h> struct iss_csi2_device; diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c index 9c8180bba77e..108961e05f53 100644 --- a/drivers/staging/media/omap4iss/iss_resizer.c +++ b/drivers/staging/media/omap4iss/iss_resizer.c @@ -158,8 +158,8 @@ static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr) /* Program UV buffer address... Hardcoded to be contiguous! */ if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) && (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) { - u32 c_addr = addr + (resizer->video_out.bpl_value * - (outformat->height - 1)); + u32 c_addr = addr + resizer->video_out.bpl_value + * outformat->height; /* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/ if ((c_addr ^ addr) & 0x7f) { diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 2a0158bb4974..e9aeca08986f 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -287,7 +287,6 @@ iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh) */ static int iss_video_queue_setup(struct vb2_queue *vq, - const void *parg, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -434,7 +433,7 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video) list_del(&buf->list); spin_unlock_irqrestore(&video->qlock, flags); - v4l2_get_timestamp(&buf->vb.timestamp); + buf->vb.vb2_buf.timestamp = ktime_get_ns(); /* Do frame number propagation only if this is the output video node. * Frame number either comes from the CSI receivers or it gets diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 8ae01753b011..0b4e819f5164 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -165,13 +165,18 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, } } +static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv) +{ + return mdiobus_get_phy(priv->mii_bus, priv->phy_addr); +} + /* * Ethtool operation */ static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); if (!phydev) return -ENODEV; @@ -181,7 +186,7 @@ static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); if (!phydev) return -ENODEV; @@ -218,7 +223,7 @@ static int xlr_net_open(struct net_device *ndev) { u32 err; struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); /* schedule a link state check */ phy_start(phydev); @@ -239,7 +244,7 @@ static int xlr_net_open(struct net_device *ndev) static int xlr_net_stop(struct net_device *ndev) { struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); phy_stop(phydev); netif_tx_stop_all_queues(ndev); @@ -268,7 +273,7 @@ static void __maybe_unused xlr_wakeup_queue(unsigned long dev) { struct net_device *ndev = (struct net_device *) dev; struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); if (phydev->link) netif_tx_wake_queue(netdev_get_tx_queue(ndev, priv->wakeup_q)); @@ -771,7 +776,7 @@ static void xlr_sgmii_init(struct xlr_net_priv *priv) void xlr_set_gmac_speed(struct xlr_net_priv *priv) { - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); int speed; if (phydev->interface == PHY_INTERFACE_MODE_SGMII) @@ -813,7 +818,7 @@ void xlr_set_gmac_speed(struct xlr_net_priv *priv) static void xlr_gmac_link_adjust(struct net_device *ndev) { struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); u32 intreg; intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG); @@ -830,7 +835,7 @@ static void xlr_gmac_link_adjust(struct net_device *ndev) static int xlr_mii_probe(struct xlr_net_priv *priv) { - struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr]; + struct phy_device *phydev = xlr_get_phydev(priv); if (!phydev) { pr_err("no PHY found on phy_addr %d\n", priv->phy_addr); @@ -838,8 +843,8 @@ static int xlr_mii_probe(struct xlr_net_priv *priv) } /* Attach MAC to PHY */ - phydev = phy_connect(priv->ndev, dev_name(&phydev->dev), - &xlr_gmac_link_adjust, priv->nd->phy_interface); + phydev = phy_connect(priv->ndev, phydev_name(phydev), + &xlr_gmac_link_adjust, priv->nd->phy_interface); if (IS_ERR(phydev)) { pr_err("could not attach PHY\n"); @@ -854,8 +859,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv) | ADVERTISED_MII); phydev->advertising = phydev->supported; - pr_info("attached PHY driver [%s] (mii_bus:phy_addr=%s\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -877,14 +881,6 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv, priv->mii_bus->read = xlr_mii_read; priv->mii_bus->write = xlr_mii_write; priv->mii_bus->parent = &pdev->dev; - priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (priv->mii_bus->irq == NULL) { - pr_err("irq alloc failed\n"); - mdiobus_free(priv->mii_bus); - return -ENOMEM; - } - - priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; /* Scan only the enabled address */ priv->mii_bus->phy_mask = ~(1 << priv->phy_addr); diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 613344b886e1..1055ee14b66a 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -78,7 +78,7 @@ static void cvm_oct_rgmii_poll(struct net_device *dev) */ spin_lock_irqsave(&global_register_lock, flags); } else { - mutex_lock(&priv->phydev->bus->mdio_lock); + mutex_lock(&priv->phydev->mdio.bus->mdio_lock); } link_info = cvmx_helper_link_get(priv->port); @@ -113,7 +113,7 @@ static void cvm_oct_rgmii_poll(struct net_device *dev) if (use_global_register_lock) spin_unlock_irqrestore(&global_register_lock, flags); else - mutex_unlock(&priv->phydev->bus->mdio_lock); + mutex_unlock(&priv->phydev->mdio.bus->mdio_lock); return; } @@ -132,7 +132,7 @@ static void cvm_oct_rgmii_poll(struct net_device *dev) if (use_global_register_lock) spin_unlock_irqrestore(&global_register_lock, flags); else - mutex_unlock(&priv->phydev->bus->mdio_lock); + mutex_unlock(&priv->phydev->mdio.bus->mdio_lock); if (priv->phydev == NULL) { /* Tell core. */ diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c index 5ed90e6c8a64..5d33b350da1c 100644 --- a/drivers/thermal/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel_quark_dts_thermal.c @@ -125,8 +125,8 @@ static int soc_dts_enable(struct thermal_zone_device *tzd) struct soc_sensor_entry *aux_entry = tzd->devdata; int ret; - ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_ENABLE, &out); + ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_ENABLE, &out); if (ret) return ret; @@ -137,8 +137,8 @@ static int soc_dts_enable(struct thermal_zone_device *tzd) if (!aux_entry->locked) { out |= QRK_DTS_ENABLE_BIT; - ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE, - QRK_DTS_REG_OFFSET_ENABLE, out); + ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, + QRK_DTS_REG_OFFSET_ENABLE, out); if (ret) return ret; @@ -158,8 +158,8 @@ static int soc_dts_disable(struct thermal_zone_device *tzd) struct soc_sensor_entry *aux_entry = tzd->devdata; int ret; - ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_ENABLE, &out); + ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_ENABLE, &out); if (ret) return ret; @@ -170,8 +170,8 @@ static int soc_dts_disable(struct thermal_zone_device *tzd) if (!aux_entry->locked) { out &= ~QRK_DTS_ENABLE_BIT; - ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE, - QRK_DTS_REG_OFFSET_ENABLE, out); + ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, + QRK_DTS_REG_OFFSET_ENABLE, out); if (ret) return ret; @@ -192,8 +192,8 @@ static int _get_trip_temp(int trip, int *temp) u32 out; mutex_lock(&dts_update_mutex); - status = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_PTPS, &out); + status = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_PTPS, &out); mutex_unlock(&dts_update_mutex); if (status) @@ -236,8 +236,8 @@ static int update_trip_temp(struct soc_sensor_entry *aux_entry, goto failed; } - ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_PTPS, &store_ptps); + ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_PTPS, &store_ptps); if (ret) goto failed; @@ -262,8 +262,8 @@ static int update_trip_temp(struct soc_sensor_entry *aux_entry, out |= (temp_out & QRK_DTS_MASK_TP_THRES) << (trip * QRK_DTS_SHIFT_TP); - ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE, - QRK_DTS_REG_OFFSET_PTPS, out); + ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, + QRK_DTS_REG_OFFSET_PTPS, out); failed: mutex_unlock(&dts_update_mutex); @@ -294,8 +294,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int ret; mutex_lock(&dts_update_mutex); - ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_TEMP, &out); + ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_TEMP, &out); mutex_unlock(&dts_update_mutex); if (ret) @@ -350,13 +350,13 @@ static void free_soc_dts(struct soc_sensor_entry *aux_entry) if (aux_entry) { if (!aux_entry->locked) { mutex_lock(&dts_update_mutex); - iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE, - QRK_DTS_REG_OFFSET_ENABLE, - aux_entry->store_dts_enable); + iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, + QRK_DTS_REG_OFFSET_ENABLE, + aux_entry->store_dts_enable); - iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE, - QRK_DTS_REG_OFFSET_PTPS, - aux_entry->store_ptps); + iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, + QRK_DTS_REG_OFFSET_PTPS, + aux_entry->store_ptps); mutex_unlock(&dts_update_mutex); } thermal_zone_device_unregister(aux_entry->tzone); @@ -378,9 +378,8 @@ static struct soc_sensor_entry *alloc_soc_dts(void) } /* Check if DTS register is locked */ - err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_LOCK, - &out); + err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_LOCK, &out); if (err) goto err_ret; @@ -395,16 +394,16 @@ static struct soc_sensor_entry *alloc_soc_dts(void) /* Store DTS default state if DTS registers are not locked */ if (!aux_entry->locked) { /* Store DTS default enable for restore on exit */ - err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_ENABLE, - &aux_entry->store_dts_enable); + err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_ENABLE, + &aux_entry->store_dts_enable); if (err) goto err_ret; /* Store DTS default PTPS register for restore on exit */ - err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ, - QRK_DTS_REG_OFFSET_PTPS, - &aux_entry->store_ptps); + err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, + QRK_DTS_REG_OFFSET_PTPS, + &aux_entry->store_ptps); if (err) goto err_ret; } diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c index 5841d1d72996..f72e1db3216f 100644 --- a/drivers/thermal/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel_soc_dts_iosf.c @@ -90,7 +90,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip, dts = tzd->devdata; sensors = dts->sensors; mutex_lock(&sensors->dts_update_lock); - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTPS, &out); mutex_unlock(&sensors->dts_update_lock); if (status) @@ -124,27 +124,27 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts, temp_out = (sensors->tj_max - temp) / 1000; - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTPS, &store_ptps); if (status) return status; out = (store_ptps & ~(0xFF << (thres_index * 8))); out |= (temp_out & 0xFF) << (thres_index * 8); - status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTPS, out); if (status) return status; pr_debug("update_trip_temp PTPS = %x\n", out); - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTMC, &out); if (status) goto err_restore_ptps; store_ptmc = out; - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_TE_AUX0 + thres_index, &te_out); if (status) @@ -167,12 +167,12 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts, out &= ~SOC_DTS_AUX0_ENABLE_BIT; te_out &= ~int_enable_bit; } - status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, out); if (status) goto err_restore_te_out; - status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_TE_AUX0 + thres_index, te_out); if (status) @@ -182,13 +182,13 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts, return 0; err_restore_te_out: - iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, store_te_out); err_restore_ptmc: - iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, store_ptmc); err_restore_ptps: - iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTPS, store_ptps); /* Nothing we can do if restore fails */ @@ -235,7 +235,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, dts = tzd->devdata; sensors = dts->sensors; - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_TEMP, &out); if (status) return status; @@ -259,14 +259,14 @@ static int soc_dts_enable(int id) u32 out; int ret; - ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_ENABLE, &out); if (ret) return ret; if (!(out & BIT(id))) { out |= BIT(id); - ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_ENABLE, out); if (ret) return ret; @@ -278,7 +278,7 @@ static int soc_dts_enable(int id) static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts) { if (dts) { - iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_ENABLE, dts->store_status); thermal_zone_device_unregister(dts->tzone); } @@ -296,9 +296,8 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts, int i; /* Store status to restor on exit */ - ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, - SOC_DTS_OFFSET_ENABLE, - &dts->store_status); + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, + SOC_DTS_OFFSET_ENABLE, &dts->store_status); if (ret) goto err_ret; @@ -311,7 +310,7 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts, } /* Check if the writable trip we provide is not used by BIOS */ - ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTPS, &store_ptps); if (ret) trip_mask = 0; @@ -374,19 +373,19 @@ void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors) spin_lock_irqsave(&sensors->intr_notify_lock, flags); - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTMC, &ptmc_out); ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT; - status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, ptmc_out); - status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTTSS, &sticky_out); pr_debug("status %d PTTSS %x\n", status, sticky_out); if (sticky_out & SOC_DTS_TRIP_MASK) { int i; /* reset sticky bit */ - status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTTSS, sticky_out); spin_unlock_irqrestore(&sensors->intr_notify_lock, flags); diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index e53d9a512c6d..2caaf5a2516d 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -32,7 +32,6 @@ #include <linux/delay.h> #undef SERIAL_PARANOIA_CHECK -#define SERIAL_DO_RESTART /* Set of debugging defines */ diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index d4a1331675ed..abbed201dc74 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -292,14 +292,14 @@ static void cyz_rx_restart(unsigned long); static struct timer_list cyz_rx_full_timer[NR_PORTS]; #endif /* CONFIG_CYZ_INTR */ -static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val) +static void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val) { struct cyclades_card *card = port->card; cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val); } -static inline u8 cyy_readb(struct cyclades_port *port, u32 reg) +static u8 cyy_readb(struct cyclades_port *port, u32 reg) { struct cyclades_card *card = port->card; @@ -321,7 +321,7 @@ static inline bool cyz_fpga_loaded(struct cyclades_card *card) return __cyz_fpga_loaded(card->ctl_addr.p9060); } -static inline bool cyz_is_loaded(struct cyclades_card *card) +static bool cyz_is_loaded(struct cyclades_card *card) { struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS; @@ -329,7 +329,7 @@ static inline bool cyz_is_loaded(struct cyclades_card *card) readl(&fw_id->signature) == ZFIRM_ID; } -static inline int serial_paranoia_check(struct cyclades_port *info, +static int serial_paranoia_check(struct cyclades_port *info, const char *name, const char *routine) { #ifdef SERIAL_PARANOIA_CHECK diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c index 2054427992e0..99875949bfb7 100644 --- a/drivers/tty/isicom.c +++ b/drivers/tty/isicom.c @@ -220,7 +220,7 @@ static struct isi_port isi_ports[PORT_COUNT]; * it wants to talk. */ -static inline int WaitTillCardIsFree(unsigned long base) +static int WaitTillCardIsFree(unsigned long base) { unsigned int count = 0; unsigned int a = in_atomic(); /* do we run under spinlock? */ @@ -280,7 +280,7 @@ static void raise_dtr(struct isi_port *port) } /* card->lock HAS to be held */ -static inline void drop_dtr(struct isi_port *port) +static void drop_dtr(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 14c54e041065..92982d7c0489 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -155,7 +155,6 @@ struct mon_str { #define LOWWAIT 2 #define EMPTYWAIT 3 -#define SERIAL_DO_RESTART #define WAKEUP_CHARS 256 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index ed776149261e..d9a5fc28fef4 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -162,12 +162,23 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, return put_user(x, ptr); } -static inline int tty_copy_to_user(struct tty_struct *tty, - void __user *to, - const void *from, - unsigned long n) +static int tty_copy_to_user(struct tty_struct *tty, void __user *to, + size_t tail, size_t n) { struct n_tty_data *ldata = tty->disc_data; + size_t size = N_TTY_BUF_SIZE - tail; + const void *from = read_buf_addr(ldata, tail); + int uncopied; + + if (n > size) { + tty_audit_add_data(tty, from, size, ldata->icanon); + uncopied = copy_to_user(to, from, size); + if (uncopied) + return uncopied; + to += size; + n -= size; + from = ldata->read_buf; + } tty_audit_add_data(tty, from, n, ldata->icanon); return copy_to_user(to, from, n); @@ -1201,9 +1212,7 @@ static void n_tty_receive_overrun(struct tty_struct *tty) ldata->num_overrun++; if (time_after(jiffies, ldata->overrun_time + HZ) || time_after(ldata->overrun_time, jiffies)) { - printk(KERN_WARNING "%s: %d input overrun(s)\n", - tty_name(tty), - ldata->num_overrun); + tty_warn(tty, "%d input overrun(s)\n", ldata->num_overrun); ldata->overrun_time = jiffies; ldata->num_overrun = 0; } @@ -1486,8 +1495,7 @@ n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag) n_tty_receive_overrun(tty); break; default: - printk(KERN_ERR "%s: unknown flag %d\n", - tty_name(tty), flag); + tty_err(tty, "unknown flag %d\n", flag); break; } } @@ -2006,11 +2014,11 @@ static int copy_from_read_buf(struct tty_struct *tty, n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); n = min(*nr, n); if (n) { - retval = copy_to_user(*b, read_buf_addr(ldata, tail), n); + const unsigned char *from = read_buf_addr(ldata, tail); + retval = copy_to_user(*b, from, n); n -= retval; - is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty); - tty_audit_add_data(tty, read_buf_addr(ldata, tail), n, - ldata->icanon); + is_eof = n == 1 && *from == EOF_CHAR(tty); + tty_audit_add_data(tty, from, n, ldata->icanon); smp_store_release(&ldata->read_tail, ldata->read_tail + n); /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && @@ -2054,13 +2062,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, size_t eol; size_t tail; int ret, found = 0; - bool eof_push = 0; /* N.B. avoid overrun if nr == 0 */ - n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); - if (!n) + if (!*nr) return 0; + n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); + tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -2072,34 +2080,24 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, if (eol == N_TTY_BUF_SIZE && more) { /* scan wrapped without finding set bit */ eol = find_next_bit(ldata->read_flags, more, 0); - if (eol != more) - found = 1; - } else if (eol != size) - found = 1; + found = eol != more; + } else + found = eol != size; - size = N_TTY_BUF_SIZE - tail; n = eol - tail; if (n > N_TTY_BUF_SIZE) n += N_TTY_BUF_SIZE; - n += found; - c = n; + c = n + found; - if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { - n--; - eof_push = !n && ldata->read_tail != ldata->line_start; + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { + c = min(*nr, c); + n = c; } - n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", - __func__, eol, found, n, c, size, more); - - if (n > size) { - ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); - if (ret) - return -EFAULT; - ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size); - } else - ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); + n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", + __func__, eol, found, n, c, tail, more); + ret = tty_copy_to_user(tty, *b, tail, n); if (ret) return -EFAULT; *b += n; @@ -2116,7 +2114,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, ldata->push = 0; tty_audit_push(tty); } - return eof_push ? -EAGAIN : 0; + return 0; } extern ssize_t redirected_tty_write(struct file *, const char __user *, @@ -2273,10 +2271,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, if (ldata->icanon && !L_EXTPROC(tty)) { retval = canon_copy_from_read_buf(tty, &b, &nr); - if (retval == -EAGAIN) { - retval = 0; - continue; - } else if (retval) + if (retval) break; } else { int uncopied; diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index a45660f62db5..b3110040164a 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -788,7 +788,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) if (retval) goto err_release; - tty_debug_hangup(tty, "(tty count=%d)\n", tty->count); + tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); tty_unlock(tty); return 0; diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index 0140ba4aacde..0982c1a44187 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -157,7 +157,7 @@ static void change_speed(struct m68k_serial *info, struct tty_struct *tty); #endif -static int m68328_console_initted = 0; +static int m68328_console_initted; static int m68328_console_baud = CONSOLE_BAUD_RATE; static int m68328_console_cbaud = DEFAULT_CBAUD; @@ -274,8 +274,8 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx) #endif ch = GET_FIELD(rx, URX_RXDATA); - if(info->is_cons) { - if(URX_BREAK & rx) { /* whee, break received */ + if (info->is_cons) { + if (URX_BREAK & rx) { /* whee, break received */ return; #ifdef CONFIG_MAGIC_SYSRQ } else if (ch == 0x10) { /* ^P */ @@ -302,7 +302,7 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx) tty_insert_flip_char(&info->tport, ch, flag); #ifndef CONFIG_XCOPILOT_BUGS - } while((rx = uart->urx.w) & URX_DATA_READY); + } while ((rx = uart->urx.w) & URX_DATA_READY); #endif tty_schedule_flip(&info->tport); @@ -330,7 +330,7 @@ static void transmit_chars(struct m68k_serial *info, struct tty_struct *tty) info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); info->xmit_cnt--; - if(info->xmit_cnt <= 0) { + if (info->xmit_cnt <= 0) { /* All done for now... TX ints off */ uart->ustcnt &= ~USTCNT_TX_INTR_MASK; goto clear_and_return; @@ -452,45 +452,45 @@ struct { } #ifndef CONFIG_M68VZ328 hw_baud_table[18] = { - {0,0}, /* 0 */ - {0,0}, /* 50 */ - {0,0}, /* 75 */ - {0,0}, /* 110 */ - {0,0}, /* 134 */ - {0,0}, /* 150 */ - {0,0}, /* 200 */ - {7,0x26}, /* 300 */ - {6,0x26}, /* 600 */ - {5,0x26}, /* 1200 */ - {0,0}, /* 1800 */ - {4,0x26}, /* 2400 */ - {3,0x26}, /* 4800 */ - {2,0x26}, /* 9600 */ - {1,0x26}, /* 19200 */ - {0,0x26}, /* 38400 */ - {1,0x38}, /* 57600 */ - {0,0x38}, /* 115200 */ + {0, 0}, /* 0 */ + {0, 0}, /* 50 */ + {0, 0}, /* 75 */ + {0, 0}, /* 110 */ + {0, 0}, /* 134 */ + {0, 0}, /* 150 */ + {0, 0}, /* 200 */ + {7, 0x26}, /* 300 */ + {6, 0x26}, /* 600 */ + {5, 0x26}, /* 1200 */ + {0, 0}, /* 1800 */ + {4, 0x26}, /* 2400 */ + {3, 0x26}, /* 4800 */ + {2, 0x26}, /* 9600 */ + {1, 0x26}, /* 19200 */ + {0, 0x26}, /* 38400 */ + {1, 0x38}, /* 57600 */ + {0, 0x38}, /* 115200 */ }; #else hw_baud_table[18] = { - {0,0}, /* 0 */ - {0,0}, /* 50 */ - {0,0}, /* 75 */ - {0,0}, /* 110 */ - {0,0}, /* 134 */ - {0,0}, /* 150 */ - {0,0}, /* 200 */ - {0,0}, /* 300 */ - {7,0x26}, /* 600 */ - {6,0x26}, /* 1200 */ - {0,0}, /* 1800 */ - {5,0x26}, /* 2400 */ - {4,0x26}, /* 4800 */ - {3,0x26}, /* 9600 */ - {2,0x26}, /* 19200 */ - {1,0x26}, /* 38400 */ - {0,0x26}, /* 57600 */ - {1,0x38}, /* 115200 */ + {0, 0}, /* 0 */ + {0, 0}, /* 50 */ + {0, 0}, /* 75 */ + {0, 0}, /* 110 */ + {0, 0}, /* 134 */ + {0, 0}, /* 150 */ + {0, 0}, /* 200 */ + {0, 0}, /* 300 */ + {7, 0x26}, /* 600 */ + {6, 0x26}, /* 1200 */ + {0, 0}, /* 1800 */ + {5, 0x26}, /* 2400 */ + {4, 0x26}, /* 4800 */ + {3, 0x26}, /* 9600 */ + {2, 0x26}, /* 19200 */ + {1, 0x26}, /* 38400 */ + {0, 0x26}, /* 57600 */ + {1, 0x38}, /* 115200 */ }; #endif /* rate = 1036800 / ((65 - prescale) * (1<<divider)) */ @@ -538,7 +538,7 @@ static void change_speed(struct m68k_serial *info, struct tty_struct *tty) #ifdef CONFIG_SERIAL_68328_RTS_CTS if (cflag & CRTSCTS) { - uart->utx.w &= ~ UTX_NOCTS; + uart->utx.w &= ~UTX_NOCTS; } else { uart->utx.w |= UTX_NOCTS; } @@ -591,8 +591,8 @@ void console_print_68328(const char *p) { char c; - while((c=*(p++)) != 0) { - if(c == '\n') + while ((c = *(p++)) != 0) { + if (c == '\n') rs_put_char('\r'); rs_put_char(c); } @@ -624,7 +624,7 @@ static void rs_flush_chars(struct tty_struct *tty) if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) return; #ifndef USE_INTS - for(;;) { + for (;;) { #endif /* Enable transmitter */ @@ -659,9 +659,9 @@ static void rs_flush_chars(struct tty_struct *tty) local_irq_restore(flags); } -extern void console_printn(const char * b, int count); +extern void console_printn(const char *b, int count); -static int rs_write(struct tty_struct * tty, +static int rs_write(struct tty_struct *tty, const unsigned char *buf, int count) { int c, total = 0; @@ -700,7 +700,7 @@ static int rs_write(struct tty_struct * tty, /* Enable transmitter */ local_irq_disable(); #ifndef USE_INTS - while(info->xmit_cnt) { + while (info->xmit_cnt) { #endif uart->ustcnt |= USTCNT_TXEN; @@ -767,7 +767,7 @@ static void rs_flush_buffer(struct tty_struct *tty) * incoming characters should be throttled. * ------------------------------------------------------------ */ -static void rs_throttle(struct tty_struct * tty) +static void rs_throttle(struct tty_struct *tty) { struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; @@ -780,7 +780,7 @@ static void rs_throttle(struct tty_struct * tty) /* Turn off RTS line (do this atomic) */ } -static void rs_unthrottle(struct tty_struct * tty) +static void rs_unthrottle(struct tty_struct *tty) { struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; @@ -803,8 +803,8 @@ static void rs_unthrottle(struct tty_struct * tty) * ------------------------------------------------------------ */ -static int get_serial_info(struct m68k_serial * info, - struct serial_struct * retinfo) +static int get_serial_info(struct m68k_serial *info, + struct serial_struct *retinfo) { struct serial_struct tmp; @@ -827,7 +827,7 @@ static int get_serial_info(struct m68k_serial * info, } static int set_serial_info(struct m68k_serial *info, struct tty_struct *tty, - struct serial_struct * new_info) + struct serial_struct *new_info) { struct tty_port *port = &info->tport; struct serial_struct new_serial; @@ -883,7 +883,7 @@ check_and_exit: * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ -static int get_lsr_info(struct m68k_serial * info, unsigned int *value) +static int get_lsr_info(struct m68k_serial *info, unsigned int *value) { #ifdef CONFIG_SERIAL_68328_RTS_CTS m68328_uart *uart = &uart_addr[info->line]; @@ -904,7 +904,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value) /* * This routine sends a break character out the serial port. */ -static void send_break(struct m68k_serial * info, unsigned int duration) +static void send_break(struct m68k_serial *info, unsigned int duration) { m68328_uart *uart = &uart_addr[info->line]; unsigned long flags; @@ -922,7 +922,7 @@ static void send_break(struct m68k_serial * info, unsigned int duration) static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { - struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; + struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; int retval; if (serial_paranoia_check(info, tty->name, "rs_ioctl")) @@ -992,9 +992,9 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ -static void rs_close(struct tty_struct *tty, struct file * filp) +static void rs_close(struct tty_struct *tty, struct file *filp) { - struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; + struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; struct tty_port *port = &info->tport; m68328_uart *uart = &uart_addr[info->line]; unsigned long flags; @@ -1079,7 +1079,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) */ void rs_hangup(struct tty_struct *tty) { - struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; + struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; if (serial_paranoia_check(info, tty->name, "rs_hangup")) return; @@ -1098,7 +1098,7 @@ void rs_hangup(struct tty_struct *tty) * the IRQ chain. It also performs the serial-specific * initialization for the tty structure. */ -int rs_open(struct tty_struct *tty, struct file * filp) +int rs_open(struct tty_struct *tty, struct file *filp) { struct m68k_serial *info; int retval; @@ -1180,7 +1180,7 @@ rs68328_init(void) local_irq_save(flags); - for(i=0;i<NR_PORTS;i++) { + for (i = 0; i < NR_PORTS; i++) { info = &m68k_soft[i]; tty_port_init(&info->tport); @@ -1198,7 +1198,7 @@ rs68328_init(void) printk(" is a builtin MC68328 UART\n"); #ifdef CONFIG_M68VZ328 - if (i > 0 ) + if (i > 0) PJSEL &= 0xCF; /* PSW enable second port output */ #endif @@ -1263,7 +1263,7 @@ int m68328_console_setup(struct console *cp, char *arg) return(-1); if (arg) - n = simple_strtoul(arg,NULL,0); + n = simple_strtoul(arg, NULL, 0); for (i = 0; i < ARRAY_SIZE(baud_table); i++) if (baud_table[i] == n) @@ -1279,7 +1279,7 @@ int m68328_console_setup(struct console *cp, char *arg) } m68328_set_baud(); /* make sure baud rate changes */ - return(0); + return 0; } @@ -1298,7 +1298,7 @@ void m68328_console_write (struct console *co, const char *str, while (count--) { if (*str == '\n') rs_put_char('\r'); - rs_put_char( *str++ ); + rs_put_char(*str++); } } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 39126460c1f5..c9720a97a977 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -620,7 +620,7 @@ static int univ8250_console_setup(struct console *co, char *options) * @options: ptr to option string from console command line * * Only attempts to match console command lines of the form: - * console=uart[8250],io|mmio|mmio32,<addr>[,<options>] + * console=uart[8250],io|mmio|mmio16|mmio32,<addr>[,<options>] * console=uart[8250],0x<addr>[,<options>] * This form is used to register an initial earlycon boot console and * replace it with the serial8250_console at 8250 driver init. @@ -650,8 +650,9 @@ static int univ8250_console_match(struct console *co, char *name, int idx, if (port->iotype != iotype) continue; - if ((iotype == UPIO_MEM || iotype == UPIO_MEM32) && - (port->mapbase != addr)) + if ((iotype == UPIO_MEM || iotype == UPIO_MEM16 || + iotype == UPIO_MEM32 || iotype == UPIO_MEM32BE) + && (port->mapbase != addr)) continue; if (iotype == UPIO_PORT && port->iobase != addr) continue; diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index ceb85792a5cf..af62131af21e 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -42,6 +42,8 @@ static unsigned int __init serial8250_early_in(struct uart_port *port, int offse switch (port->iotype) { case UPIO_MEM: return readb(port->membase + offset); + case UPIO_MEM16: + return readw(port->membase + (offset << 1)); case UPIO_MEM32: return readl(port->membase + (offset << 2)); case UPIO_MEM32BE: @@ -59,6 +61,9 @@ static void __init serial8250_early_out(struct uart_port *port, int offset, int case UPIO_MEM: writeb(value, port->membase + offset); break; + case UPIO_MEM16: + writew(value, port->membase + (offset << 1)); + break; case UPIO_MEM32: writel(value, port->membase + (offset << 2)); break; @@ -73,43 +78,27 @@ static void __init serial8250_early_out(struct uart_port *port, int offset, int #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -static void __init wait_for_xmitr(struct uart_port *port) +static void __init serial_putc(struct uart_port *port, int c) { unsigned int status; + serial8250_early_out(port, UART_TX, c); + for (;;) { status = serial8250_early_in(port, UART_LSR); if ((status & BOTH_EMPTY) == BOTH_EMPTY) - return; + break; cpu_relax(); } } -static void __init serial_putc(struct uart_port *port, int c) -{ - wait_for_xmitr(port); - serial8250_early_out(port, UART_TX, c); -} - static void __init early_serial8250_write(struct console *console, const char *s, unsigned int count) { struct earlycon_device *device = console->data; struct uart_port *port = &device->port; - unsigned int ier; - - /* Save the IER and disable interrupts preserving the UUE bit */ - ier = serial8250_early_in(port, UART_IER); - if (ier) - serial8250_early_out(port, UART_IER, ier & UART_IER_UUE); uart_console_write(port, s, count, serial_putc); - - /* Wait for transmitter to become empty and restore the IER */ - wait_for_xmitr(port); - - if (ier) - serial8250_early_out(port, UART_IER, ier); } static void __init init_port(struct earlycon_device *device) diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c index 49394b4c5cfd..d6e1ec9b4fde 100644 --- a/drivers/tty/serial/8250/8250_ingenic.c +++ b/drivers/tty/serial/8250/8250_ingenic.c @@ -48,6 +48,7 @@ static const struct of_device_id of_match[]; #define UART_MCR_MDCE BIT(7) #define UART_MCR_FCM BIT(6) +#ifdef CONFIG_SERIAL_EARLYCON static struct earlycon_device *early_device; static uint8_t __init early_in(struct uart_port *port, int offset) @@ -140,6 +141,7 @@ OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart", EARLYCON_DECLARE(jz4780_uart, ingenic_early_console_setup); OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart", ingenic_early_console_setup); +#endif /* CONFIG_SERIAL_EARLYCON */ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value) { diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 78883ca64ddd..0e590b233f03 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -16,7 +16,7 @@ */ #include <linux/clk.h> #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -245,23 +245,6 @@ static int mtk8250_probe(struct platform_device *pdev) return 0; } -static int mtk8250_remove(struct platform_device *pdev) -{ - struct mtk8250_data *data = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&pdev->dev); - - serial8250_unregister_port(data->line); - - pm_runtime_disable(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); - - if (!pm_runtime_status_suspended(&pdev->dev)) - mtk8250_runtime_suspend(&pdev->dev); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int mtk8250_suspend(struct device *dev) { @@ -292,18 +275,18 @@ static const struct of_device_id mtk8250_of_match[] = { { .compatible = "mediatek,mt6577-uart" }, { /* Sentinel */ } }; -MODULE_DEVICE_TABLE(of, mtk8250_of_match); static struct platform_driver mtk8250_platform_driver = { .driver = { - .name = "mt6577-uart", - .pm = &mtk8250_pm_ops, - .of_match_table = mtk8250_of_match, + .name = "mt6577-uart", + .pm = &mtk8250_pm_ops, + .of_match_table = mtk8250_of_match, + .suppress_bind_attrs = true, + }, .probe = mtk8250_probe, - .remove = mtk8250_remove, }; -module_platform_driver(mtk8250_platform_driver); +builtin_platform_driver(mtk8250_platform_driver); #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init early_mtk8250_setup(struct earlycon_device *device, @@ -319,7 +302,3 @@ static int __init early_mtk8250_setup(struct earlycon_device *device, OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup); #endif - -MODULE_AUTHOR("Matthias Brugger"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Mediatek 8250 serial port driver"); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/8250/8250_of.c index de5029649795..33021c1f7d55 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -18,14 +18,9 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> -#include <linux/nwpserial.h> #include <linux/clk.h> -#ifdef CONFIG_SERIAL_8250_MODULE -#define CONFIG_SERIAL_8250 CONFIG_SERIAL_8250_MODULE -#endif - -#include "8250/8250.h" +#include "8250.h" struct of_serial_info { struct clk *clk; @@ -122,6 +117,9 @@ static int of_platform_serial_setup(struct platform_device *ofdev, case 1: port->iotype = UPIO_MEM; break; + case 2: + port->iotype = UPIO_MEM16; + break; case 4: port->iotype = of_device_is_big_endian(np) ? UPIO_MEM32BE : UPIO_MEM32; @@ -195,7 +193,6 @@ static int of_platform_serial_probe(struct platform_device *ofdev) goto out; switch (port_type) { -#ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: { struct uart_8250_port port8250; @@ -212,12 +209,6 @@ static int of_platform_serial_probe(struct platform_device *ofdev) ret = serial8250_register_8250_port(&port8250); break; } -#endif -#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL - case PORT_NWPSERIAL: - ret = nwpserial_register_port(&port); - break; -#endif default: /* need to add code for these */ case PORT_UNKNOWN: @@ -245,16 +236,9 @@ static int of_platform_serial_remove(struct platform_device *ofdev) { struct of_serial_info *info = platform_get_drvdata(ofdev); switch (info->type) { -#ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: serial8250_unregister_port(info->line); break; -#endif -#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL - case PORT_NWPSERIAL: - nwpserial_unregister_port(info->line); - break; -#endif default: /* need to add code for these */ break; @@ -267,7 +251,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev) } #ifdef CONFIG_PM_SLEEP -#ifdef CONFIG_SERIAL_8250 static void of_serial_suspend_8250(struct of_serial_info *info) { struct uart_8250_port *port8250 = serial8250_get_port(info->line); @@ -288,15 +271,6 @@ static void of_serial_resume_8250(struct of_serial_info *info) serial8250_resume_port(info->line); } -#else -static inline void of_serial_suspend_8250(struct of_serial_info *info) -{ -} - -static inline void of_serial_resume_8250(struct of_serial_info *info) -{ -} -#endif static int of_serial_suspend(struct device *dev) { @@ -353,10 +327,6 @@ static const struct of_device_id of_platform_serial_table[] = { .data = (void *)PORT_XSCALE, }, { .compatible = "mrvl,pxa-uart", .data = (void *)PORT_XSCALE, }, -#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL - { .compatible = "ibm,qpace-nwp-serial", - .data = (void *)PORT_NWPSERIAL, }, -#endif { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, of_platform_serial_table); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 52d82d2ac726..8d262bce97e4 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -368,6 +368,18 @@ static void mem_serial_out(struct uart_port *p, int offset, int value) writeb(value, p->membase + offset); } +static void mem16_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + writew(value, p->membase + offset); +} + +static unsigned int mem16_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return readw(p->membase + offset); +} + static void mem32_serial_out(struct uart_port *p, int offset, int value) { offset = offset << p->regshift; @@ -425,6 +437,11 @@ static void set_io_from_upio(struct uart_port *p) p->serial_out = mem_serial_out; break; + case UPIO_MEM16: + p->serial_in = mem16_serial_in; + p->serial_out = mem16_serial_out; + break; + case UPIO_MEM32: p->serial_in = mem32_serial_in; p->serial_out = mem32_serial_out; @@ -459,6 +476,7 @@ serial_port_out_sync(struct uart_port *p, int offset, int value) { switch (p->iotype) { case UPIO_MEM: + case UPIO_MEM16: case UPIO_MEM32: case UPIO_MEM32BE: case UPIO_AU: @@ -2462,6 +2480,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM32BE: + case UPIO_MEM16: case UPIO_MEM: if (!port->mapbase) break; @@ -2499,6 +2518,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM32BE: + case UPIO_MEM16: case UPIO_MEM: if (!port->mapbase) break; diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index d11621e2cf1d..bab6b3ae2540 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -13,6 +13,7 @@ */ #include <linux/clk.h> +#include <linux/console.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> @@ -34,6 +35,29 @@ struct uniphier8250_priv { spinlock_t atomic_write_lock; }; +#ifdef CONFIG_SERIAL_8250_CONSOLE +static int __init uniphier_early_console_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + /* This hardware always expects MMIO32 register interface. */ + device->port.iotype = UPIO_MEM32; + device->port.regshift = 2; + + /* + * Do not touch the divisor register in early_serial8250_setup(); + * we assume it has been initialized by a boot loader. + */ + device->baud = 0; + + return early_serial8250_setup(device, options); +} +OF_EARLYCON_DECLARE(uniphier, "socionext,uniphier-uart", + uniphier_early_console_setup); +#endif + /* * The register map is slightly different from that of 8250. * IO callbacks must be overridden for correct access to FCR, LCR, and MCR. @@ -115,12 +139,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) */ static int uniphier_serial_dl_read(struct uart_8250_port *up) { - return readl(up->port.membase + UNIPHIER_UART_DLR); + int offset = UNIPHIER_UART_DLR << up->port.regshift; + + return readl(up->port.membase + offset); } static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) { - writel(value, up->port.membase + UNIPHIER_UART_DLR); + int offset = UNIPHIER_UART_DLR << up->port.regshift; + + writel(value, up->port.membase + offset); } static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 6412f1455beb..b03cb5175113 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -361,9 +361,8 @@ config SERIAL_8250_UNIPHIER config SERIAL_8250_INGENIC bool "Support for Ingenic SoC serial ports" - depends on SERIAL_8250_CONSOLE && OF_FLATTREE + depends on OF_FLATTREE select LIBFDT - select SERIAL_EARLYCON help If you have a system using an Ingenic SoC and wish to make use of its UARTs, say Y to this option. If unsure, say N. @@ -372,9 +371,18 @@ config SERIAL_8250_MID tristate "Support for serial ports on Intel MID platforms" depends on SERIAL_8250 && PCI select HSU_DMA if SERIAL_8250_DMA - select HSU_DMA_PCI if X86_INTEL_MID + select HSU_DMA_PCI if (HSU_DMA && X86_INTEL_MID) select RATIONAL help Selecting this option will enable handling of the extra features present on the UART found on Intel Medfield SOC and various other Intel platforms. + +config SERIAL_OF_PLATFORM + tristate "Devicetree based probing for 8250 ports" + depends on SERIAL_8250 && OF + help + This option is used for all 8250 compatible serial ports that + are probed through devicetree, including Open Firmware based + PowerPC systems and embedded systems on architectures using the + flattened device tree format. diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index e177f8681ada..b9b9bca5b6c3 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -28,5 +28,6 @@ obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o +obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index f38beb28e7ae..d27a0c62a75f 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -115,6 +115,7 @@ config SERIAL_SB1250_DUART_CONSOLE config SERIAL_ATMEL bool "AT91 / AT32 on-chip serial port support" + depends on HAS_DMA depends on ARCH_AT91 || AVR32 || COMPILE_TEST select SERIAL_CORE select SERIAL_MCTRL_GPIO if GPIOLIB @@ -571,9 +572,11 @@ config BFIN_UART3_CTSRTS config SERIAL_IMX tristate "IMX serial port support" + depends on HAS_DMA depends on ARCH_MXC || COMPILE_TEST select SERIAL_CORE select RATIONAL + select SERIAL_MCTRL_GPIO if GPIOLIB help If you have a machine based on a Motorola IMX CPU you can enable its onboard serial port by enabling this option. @@ -1094,16 +1097,6 @@ config SERIAL_NETX_CONSOLE If you have enabled the serial port on the Hilscher NetX SoC you can make it the console by answering Y to this option. -config SERIAL_OF_PLATFORM - tristate "Serial port on Open Firmware platform bus" - depends on OF - depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL - help - If you have a PowerPC based system that has serial ports - on a platform specific bus, you should enable this option. - Currently, only 8250 compatible ports are supported, but - others can easily be added. - config SERIAL_OMAP tristate "OMAP serial port support" depends on ARCH_OMAP2PLUS @@ -1131,23 +1124,6 @@ config SERIAL_OMAP_CONSOLE your boot loader about how to pass options to the kernel at boot time.) -config SERIAL_OF_PLATFORM_NWPSERIAL - tristate "NWP serial port driver" - depends on PPC_DCR - select SERIAL_OF_PLATFORM - select SERIAL_CORE_CONSOLE - select SERIAL_CORE - help - This driver supports the cell network processor nwp serial - device. - -config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE - bool "Console on NWP serial port" - depends on SERIAL_OF_PLATFORM_NWPSERIAL=y - select SERIAL_CORE_CONSOLE - help - Support for Console on the NWP serial ports. - config SERIAL_LANTIQ bool "Lantiq serial driver" depends on LANTIQ @@ -1409,8 +1385,9 @@ config SERIAL_PCH_UART_CONSOLE warnings and which allows logins in single user mode). config SERIAL_MXS_AUART - depends on ARCH_MXS || COMPILE_TEST tristate "MXS AUART support" + depends on HAS_DMA + depends on ARCH_MXS || COMPILE_TEST select SERIAL_CORE select SERIAL_MCTRL_GPIO if GPIOLIB help diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 5ab41119b3dc..b391c9b31960 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -63,8 +63,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o -obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o -obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 899a77187bde..c0da0ccbbcf5 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -60,6 +60,8 @@ #include <linux/io.h> #include <linux/acpi.h> +#include "amba-pl011.h" + #define UART_NR 14 #define SERIAL_AMBA_MAJOR 204 @@ -71,11 +73,27 @@ #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) #define UART_DUMMY_DR_RX (1 << 16) +static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { + [REG_DR] = UART01x_DR, + [REG_FR] = UART01x_FR, + [REG_LCRH_RX] = UART011_LCRH, + [REG_LCRH_TX] = UART011_LCRH, + [REG_IBRD] = UART011_IBRD, + [REG_FBRD] = UART011_FBRD, + [REG_CR] = UART011_CR, + [REG_IFLS] = UART011_IFLS, + [REG_IMSC] = UART011_IMSC, + [REG_RIS] = UART011_RIS, + [REG_MIS] = UART011_MIS, + [REG_ICR] = UART011_ICR, + [REG_DMACR] = UART011_DMACR, +}; + /* There is by now at least one vendor with differing details, so handle it */ struct vendor_data { + const u16 *reg_offset; unsigned int ifls; - unsigned int lcrh_tx; - unsigned int lcrh_rx; + bool access_32b; bool oversampling; bool dma_threshold; bool cts_event_workaround; @@ -91,9 +109,8 @@ static unsigned int get_fifosize_arm(struct amba_device *dev) } static struct vendor_data vendor_arm = { + .reg_offset = pl011_std_offsets, .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, - .lcrh_tx = UART011_LCRH, - .lcrh_rx = UART011_LCRH, .oversampling = false, .dma_threshold = false, .cts_event_workaround = false, @@ -103,6 +120,7 @@ static struct vendor_data vendor_arm = { }; static struct vendor_data vendor_sbsa = { + .reg_offset = pl011_std_offsets, .oversampling = false, .dma_threshold = false, .cts_event_workaround = false, @@ -110,15 +128,41 @@ static struct vendor_data vendor_sbsa = { .fixed_options = true, }; +static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { + [REG_DR] = UART01x_DR, + [REG_ST_DMAWM] = ST_UART011_DMAWM, + [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, + [REG_FR] = UART01x_FR, + [REG_LCRH_RX] = ST_UART011_LCRH_RX, + [REG_LCRH_TX] = ST_UART011_LCRH_TX, + [REG_IBRD] = UART011_IBRD, + [REG_FBRD] = UART011_FBRD, + [REG_CR] = UART011_CR, + [REG_IFLS] = UART011_IFLS, + [REG_IMSC] = UART011_IMSC, + [REG_RIS] = UART011_RIS, + [REG_MIS] = UART011_MIS, + [REG_ICR] = UART011_ICR, + [REG_DMACR] = UART011_DMACR, + [REG_ST_XFCR] = ST_UART011_XFCR, + [REG_ST_XON1] = ST_UART011_XON1, + [REG_ST_XON2] = ST_UART011_XON2, + [REG_ST_XOFF1] = ST_UART011_XOFF1, + [REG_ST_XOFF2] = ST_UART011_XOFF2, + [REG_ST_ITCR] = ST_UART011_ITCR, + [REG_ST_ITIP] = ST_UART011_ITIP, + [REG_ST_ABCR] = ST_UART011_ABCR, + [REG_ST_ABIMSC] = ST_UART011_ABIMSC, +}; + static unsigned int get_fifosize_st(struct amba_device *dev) { return 64; } static struct vendor_data vendor_st = { + .reg_offset = pl011_st_offsets, .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, - .lcrh_tx = ST_UART011_LCRH_TX, - .lcrh_rx = ST_UART011_LCRH_RX, .oversampling = true, .dma_threshold = true, .cts_event_workaround = true, @@ -127,6 +171,29 @@ static struct vendor_data vendor_st = { .get_fifosize = get_fifosize_st, }; +static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = { + [REG_DR] = ZX_UART011_DR, + [REG_FR] = ZX_UART011_FR, + [REG_LCRH_RX] = ZX_UART011_LCRH, + [REG_LCRH_TX] = ZX_UART011_LCRH, + [REG_IBRD] = ZX_UART011_IBRD, + [REG_FBRD] = ZX_UART011_FBRD, + [REG_CR] = ZX_UART011_CR, + [REG_IFLS] = ZX_UART011_IFLS, + [REG_IMSC] = ZX_UART011_IMSC, + [REG_RIS] = ZX_UART011_RIS, + [REG_MIS] = ZX_UART011_MIS, + [REG_ICR] = ZX_UART011_ICR, + [REG_DMACR] = ZX_UART011_DMACR, +}; + +static struct vendor_data vendor_zte = { + .reg_offset = pl011_zte_offsets, + .access_32b = true, + .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, + .get_fifosize = get_fifosize_arm, +}; + /* Deals with DMA transactions */ struct pl011_sgbuf { @@ -162,14 +229,13 @@ struct pl011_dmatx_data { */ struct uart_amba_port { struct uart_port port; + const u16 *reg_offset; struct clk *clk; const struct vendor_data *vendor; unsigned int dmacr; /* dma control reg */ unsigned int im; /* interrupt mask */ unsigned int old_status; unsigned int fifosize; /* vendor-specific */ - unsigned int lcrh_tx; /* vendor-specific */ - unsigned int lcrh_rx; /* vendor-specific */ unsigned int old_cr; /* state during shutdown */ bool autorts; unsigned int fixed_baud; /* vendor-set fixed baud rate */ @@ -184,6 +250,32 @@ struct uart_amba_port { #endif }; +static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, + unsigned int reg) +{ + return uap->reg_offset[reg]; +} + +static unsigned int pl011_read(const struct uart_amba_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); + + return (uap->port.iotype == UPIO_MEM32) ? + readl_relaxed(addr) : readw_relaxed(addr); +} + +static void pl011_write(unsigned int val, const struct uart_amba_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); + + if (uap->port.iotype == UPIO_MEM32) + writel_relaxed(val, addr); + else + writew_relaxed(val, addr); +} + /* * Reads up to 256 characters from the FIFO or until it's empty and * inserts them into the TTY layer. Returns the number of characters @@ -196,13 +288,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap) int fifotaken = 0; while (max_count--) { - status = readw(uap->port.membase + UART01x_FR); + status = pl011_read(uap, REG_FR); if (status & UART01x_FR_RXFE) break; /* Take chars from the FIFO and update status */ - ch = readw(uap->port.membase + UART01x_DR) | - UART_DUMMY_DR_RX; + ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX; flag = TTY_NORMAL; uap->port.icount.rx++; fifotaken++; @@ -284,7 +375,8 @@ static void pl011_dma_probe(struct uart_amba_port *uap) struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); struct device *dev = uap->port.dev; struct dma_slave_config tx_conf = { - .dst_addr = uap->port.mapbase + UART01x_DR, + .dst_addr = uap->port.mapbase + + pl011_reg_to_offset(uap, REG_DR), .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, .direction = DMA_MEM_TO_DEV, .dst_maxburst = uap->fifosize >> 1, @@ -339,7 +431,8 @@ static void pl011_dma_probe(struct uart_amba_port *uap) if (chan) { struct dma_slave_config rx_conf = { - .src_addr = uap->port.mapbase + UART01x_DR, + .src_addr = uap->port.mapbase + + pl011_reg_to_offset(uap, REG_DR), .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, .direction = DMA_DEV_TO_MEM, .src_maxburst = uap->fifosize >> 2, @@ -438,7 +531,7 @@ static void pl011_dma_tx_callback(void *data) dmacr = uap->dmacr; uap->dmacr = dmacr & ~UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); /* * If TX DMA was disabled, it means that we've stopped the DMA for @@ -552,7 +645,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) dma_dev->device_issue_pending(chan); uap->dmacr |= UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); uap->dmatx.queued = true; /* @@ -588,9 +681,9 @@ static bool pl011_dma_tx_irq(struct uart_amba_port *uap) */ if (uap->dmatx.queued) { uap->dmacr |= UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); uap->im &= ~UART011_TXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); return true; } @@ -600,7 +693,7 @@ static bool pl011_dma_tx_irq(struct uart_amba_port *uap) */ if (pl011_dma_tx_refill(uap) > 0) { uap->im &= ~UART011_TXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); return true; } return false; @@ -614,7 +707,7 @@ static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) { if (uap->dmatx.queued) { uap->dmacr &= ~UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); } } @@ -640,14 +733,12 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) if (!uap->dmatx.queued) { if (pl011_dma_tx_refill(uap) > 0) { uap->im &= ~UART011_TXIM; - writew(uap->im, uap->port.membase + - UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); } else ret = false; } else if (!(uap->dmacr & UART011_TXDMAE)) { uap->dmacr |= UART011_TXDMAE; - writew(uap->dmacr, - uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); } return ret; } @@ -658,9 +749,9 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) */ dmacr = uap->dmacr; uap->dmacr &= ~UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); - if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { + if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { /* * No space in the FIFO, so enable the transmit interrupt * so we know when there is space. Note that once we've @@ -669,13 +760,13 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) return false; } - writew(uap->port.x_char, uap->port.membase + UART01x_DR); + pl011_write(uap->port.x_char, uap, REG_DR); uap->port.icount.tx++; uap->port.x_char = 0; /* Success - restore the DMA state */ uap->dmacr = dmacr; - writew(dmacr, uap->port.membase + UART011_DMACR); + pl011_write(dmacr, uap, REG_DMACR); return true; } @@ -703,7 +794,7 @@ __acquires(&uap->port.lock) DMA_TO_DEVICE); uap->dmatx.queued = false; uap->dmacr &= ~UART011_TXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); } } @@ -743,11 +834,11 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) dma_async_issue_pending(rxchan); uap->dmacr |= UART011_RXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); uap->dmarx.running = true; uap->im &= ~UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); return 0; } @@ -805,8 +896,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap, */ if (dma_count == pending && readfifo) { /* Clear any error flags */ - writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, - uap->port.membase + UART011_ICR); + pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | + UART011_FEIS, uap, REG_ICR); /* * If we read all the DMA'd characters, and we had an @@ -854,7 +945,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap) /* Disable RX DMA - incoming data will wait in the FIFO */ uap->dmacr &= ~UART011_RXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); uap->dmarx.running = false; pending = sgbuf->sg.length - state.residue; @@ -874,7 +965,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap) dev_dbg(uap->port.dev, "could not retrigger RX DMA job " "fall back to interrupt mode\n"); uap->im |= UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); } } @@ -922,7 +1013,7 @@ static void pl011_dma_rx_callback(void *data) dev_dbg(uap->port.dev, "could not retrigger RX DMA job " "fall back to interrupt mode\n"); uap->im |= UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); } } @@ -935,7 +1026,7 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) { /* FIXME. Just disable the DMA enable */ uap->dmacr &= ~UART011_RXDMAE; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); } /* @@ -979,7 +1070,7 @@ static void pl011_dma_rx_poll(unsigned long args) spin_lock_irqsave(&uap->port.lock, flags); pl011_dma_rx_stop(uap); uap->im |= UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); spin_unlock_irqrestore(&uap->port.lock, flags); uap->dmarx.running = false; @@ -1041,7 +1132,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap) skip_rx: /* Turn on DMA error (RX/TX will be enabled on demand) */ uap->dmacr |= UART011_DMAONERR; - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); /* * ST Micro variants has some specific dma burst threshold @@ -1049,8 +1140,8 @@ skip_rx: * be issued above/below 16 bytes. */ if (uap->vendor->dma_threshold) - writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, - uap->port.membase + ST_UART011_DMAWM); + pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, + uap, REG_ST_DMAWM); if (uap->using_rx_dma) { if (pl011_dma_rx_trigger_dma(uap)) @@ -1075,12 +1166,12 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) return; /* Disable RX and TX DMA */ - while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) + while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY) barrier(); spin_lock_irq(&uap->port.lock); uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); - writew(uap->dmacr, uap->port.membase + UART011_DMACR); + pl011_write(uap->dmacr, uap, REG_DMACR); spin_unlock_irq(&uap->port.lock); if (uap->using_tx_dma) { @@ -1181,7 +1272,7 @@ static void pl011_stop_tx(struct uart_port *port) container_of(port, struct uart_amba_port, port); uap->im &= ~UART011_TXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); pl011_dma_tx_stop(uap); } @@ -1191,7 +1282,7 @@ static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); static void pl011_start_tx_pio(struct uart_amba_port *uap) { uap->im |= UART011_TXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); pl011_tx_chars(uap, false); } @@ -1211,7 +1302,7 @@ static void pl011_stop_rx(struct uart_port *port) uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| UART011_PEIM|UART011_BEIM|UART011_OEIM); - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); pl011_dma_rx_stop(uap); } @@ -1222,7 +1313,7 @@ static void pl011_enable_ms(struct uart_port *port) container_of(port, struct uart_amba_port, port); uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); } static void pl011_rx_chars(struct uart_amba_port *uap) @@ -1242,7 +1333,7 @@ __acquires(&uap->port.lock) dev_dbg(uap->port.dev, "could not trigger RX DMA job " "fall back to interrupt mode again\n"); uap->im |= UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); } else { #ifdef CONFIG_DMA_ENGINE /* Start Rx DMA poll */ @@ -1263,10 +1354,10 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, bool from_irq) { if (unlikely(!from_irq) && - readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + pl011_read(uap, REG_FR) & UART01x_FR_TXFF) return false; /* unable to transmit character */ - writew(c, uap->port.membase + UART01x_DR); + pl011_write(c, uap, REG_DR); uap->port.icount.tx++; return true; @@ -1313,7 +1404,7 @@ static void pl011_modem_status(struct uart_amba_port *uap) { unsigned int status, delta; - status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; + status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; delta = status ^ uap->old_status; uap->old_status = status; @@ -1341,15 +1432,15 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap) return; /* workaround to make sure that all bits are unlocked.. */ - writew(0x00, uap->port.membase + UART011_ICR); + pl011_write(0x00, uap, REG_ICR); /* * WA: introduce 26ns(1 uart clk) delay before W1C; * single apb access will incur 2 pclk(133.12Mhz) delay, * so add 2 dummy reads */ - dummy_read = readw(uap->port.membase + UART011_ICR); - dummy_read = readw(uap->port.membase + UART011_ICR); + dummy_read = pl011_read(uap, REG_ICR); + dummy_read = pl011_read(uap, REG_ICR); } static irqreturn_t pl011_int(int irq, void *dev_id) @@ -1361,15 +1452,15 @@ static irqreturn_t pl011_int(int irq, void *dev_id) int handled = 0; spin_lock_irqsave(&uap->port.lock, flags); - imsc = readw(uap->port.membase + UART011_IMSC); - status = readw(uap->port.membase + UART011_RIS) & imsc; + imsc = pl011_read(uap, REG_IMSC); + status = pl011_read(uap, REG_RIS) & imsc; if (status) { do { check_apply_cts_event_workaround(uap); - writew(status & ~(UART011_TXIS|UART011_RTIS| - UART011_RXIS), - uap->port.membase + UART011_ICR); + pl011_write(status & ~(UART011_TXIS|UART011_RTIS| + UART011_RXIS), + uap, REG_ICR); if (status & (UART011_RTIS|UART011_RXIS)) { if (pl011_dma_rx_running(uap)) @@ -1386,7 +1477,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id) if (pass_counter-- == 0) break; - status = readw(uap->port.membase + UART011_RIS) & imsc; + status = pl011_read(uap, REG_RIS) & imsc; } while (status != 0); handled = 1; } @@ -1400,7 +1491,7 @@ static unsigned int pl011_tx_empty(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); - unsigned int status = readw(uap->port.membase + UART01x_FR); + unsigned int status = pl011_read(uap, REG_FR); return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT; } @@ -1409,7 +1500,7 @@ static unsigned int pl011_get_mctrl(struct uart_port *port) struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); unsigned int result = 0; - unsigned int status = readw(uap->port.membase + UART01x_FR); + unsigned int status = pl011_read(uap, REG_FR); #define TIOCMBIT(uartbit, tiocmbit) \ if (status & uartbit) \ @@ -1429,7 +1520,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) container_of(port, struct uart_amba_port, port); unsigned int cr; - cr = readw(uap->port.membase + UART011_CR); + cr = pl011_read(uap, REG_CR); #define TIOCMBIT(tiocmbit, uartbit) \ if (mctrl & tiocmbit) \ @@ -1449,7 +1540,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) } #undef TIOCMBIT - writew(cr, uap->port.membase + UART011_CR); + pl011_write(cr, uap, REG_CR); } static void pl011_break_ctl(struct uart_port *port, int break_state) @@ -1460,12 +1551,12 @@ static void pl011_break_ctl(struct uart_port *port, int break_state) unsigned int lcr_h; spin_lock_irqsave(&uap->port.lock, flags); - lcr_h = readw(uap->port.membase + uap->lcrh_tx); + lcr_h = pl011_read(uap, REG_LCRH_TX); if (break_state == -1) lcr_h |= UART01x_LCRH_BRK; else lcr_h &= ~UART01x_LCRH_BRK; - writew(lcr_h, uap->port.membase + uap->lcrh_tx); + pl011_write(lcr_h, uap, REG_LCRH_TX); spin_unlock_irqrestore(&uap->port.lock, flags); } @@ -1475,9 +1566,8 @@ static void pl011_quiesce_irqs(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); - unsigned char __iomem *regs = uap->port.membase; - writew(readw(regs + UART011_MIS), regs + UART011_ICR); + pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); /* * There is no way to clear TXIM as this is "ready to transmit IRQ", so * we simply mask it. start_tx() will unmask it. @@ -1491,7 +1581,8 @@ static void pl011_quiesce_irqs(struct uart_port *port) * (including tx queue), so we're also fine with start_tx()'s caller * side. */ - writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC); + pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap, + REG_IMSC); } static int pl011_get_poll_char(struct uart_port *port) @@ -1506,11 +1597,11 @@ static int pl011_get_poll_char(struct uart_port *port) */ pl011_quiesce_irqs(port); - status = readw(uap->port.membase + UART01x_FR); + status = pl011_read(uap, REG_FR); if (status & UART01x_FR_RXFE) return NO_POLL_CHAR; - return readw(uap->port.membase + UART01x_DR); + return pl011_read(uap, REG_DR); } static void pl011_put_poll_char(struct uart_port *port, @@ -1519,10 +1610,10 @@ static void pl011_put_poll_char(struct uart_port *port, struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); - while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) barrier(); - writew(ch, uap->port.membase + UART01x_DR); + pl011_write(ch, uap, REG_DR); } #endif /* CONFIG_CONSOLE_POLL */ @@ -1546,15 +1637,16 @@ static int pl011_hwinit(struct uart_port *port) uap->port.uartclk = clk_get_rate(uap->clk); /* Clear pending error and receive interrupts */ - writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | - UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); + pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | + UART011_FEIS | UART011_RTIS | UART011_RXIS, + uap, REG_ICR); /* * Save interrupts enable mask, and enable RX interrupts in case if * the interrupt is used for NMI entry. */ - uap->im = readw(uap->port.membase + UART011_IMSC); - writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC); + uap->im = pl011_read(uap, REG_IMSC); + pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC); if (dev_get_platdata(uap->port.dev)) { struct amba_pl011_data *plat; @@ -1566,24 +1658,30 @@ static int pl011_hwinit(struct uart_port *port) return 0; } +static bool pl011_split_lcrh(const struct uart_amba_port *uap) +{ + return pl011_reg_to_offset(uap, REG_LCRH_RX) != + pl011_reg_to_offset(uap, REG_LCRH_TX); +} + static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) { - writew(lcr_h, uap->port.membase + uap->lcrh_rx); - if (uap->lcrh_rx != uap->lcrh_tx) { + pl011_write(lcr_h, uap, REG_LCRH_RX); + if (pl011_split_lcrh(uap)) { int i; /* * Wait 10 PCLKs before writing LCRH_TX register, * to get this delay write read only register 10 times */ for (i = 0; i < 10; ++i) - writew(0xff, uap->port.membase + UART011_MIS); - writew(lcr_h, uap->port.membase + uap->lcrh_tx); + pl011_write(0xff, uap, REG_MIS); + pl011_write(lcr_h, uap, REG_LCRH_TX); } } static int pl011_allocate_irq(struct uart_amba_port *uap) { - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); } @@ -1598,12 +1696,11 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) spin_lock_irq(&uap->port.lock); /* Clear out any spuriously appearing RX interrupts */ - writew(UART011_RTIS | UART011_RXIS, - uap->port.membase + UART011_ICR); + pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); uap->im = UART011_RTIM; if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; - writew(uap->im, uap->port.membase + UART011_IMSC); + pl011_write(uap->im, uap, REG_IMSC); spin_unlock_irq(&uap->port.lock); } @@ -1622,21 +1719,21 @@ static int pl011_startup(struct uart_port *port) if (retval) goto clk_dis; - writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); + pl011_write(uap->vendor->ifls, uap, REG_IFLS); spin_lock_irq(&uap->port.lock); /* restore RTS and DTR */ cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; - writew(cr, uap->port.membase + UART011_CR); + pl011_write(cr, uap, REG_CR); spin_unlock_irq(&uap->port.lock); /* * initialise the old status of the modem signals */ - uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; + uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; /* Startup DMA */ pl011_dma_startup(uap); @@ -1677,9 +1774,9 @@ static void pl011_shutdown_channel(struct uart_amba_port *uap, { unsigned long val; - val = readw(uap->port.membase + lcrh); + val = pl011_read(uap, lcrh); val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); - writew(val, uap->port.membase + lcrh); + pl011_write(val, uap, lcrh); } /* @@ -1693,19 +1790,19 @@ static void pl011_disable_uart(struct uart_amba_port *uap) uap->autorts = false; spin_lock_irq(&uap->port.lock); - cr = readw(uap->port.membase + UART011_CR); + cr = pl011_read(uap, REG_CR); uap->old_cr = cr; cr &= UART011_CR_RTS | UART011_CR_DTR; cr |= UART01x_CR_UARTEN | UART011_CR_TXE; - writew(cr, uap->port.membase + UART011_CR); + pl011_write(cr, uap, REG_CR); spin_unlock_irq(&uap->port.lock); /* * disable break condition and fifos */ - pl011_shutdown_channel(uap, uap->lcrh_rx); - if (uap->lcrh_rx != uap->lcrh_tx) - pl011_shutdown_channel(uap, uap->lcrh_tx); + pl011_shutdown_channel(uap, REG_LCRH_RX); + if (pl011_split_lcrh(uap)) + pl011_shutdown_channel(uap, REG_LCRH_TX); } static void pl011_disable_interrupts(struct uart_amba_port *uap) @@ -1714,8 +1811,8 @@ static void pl011_disable_interrupts(struct uart_amba_port *uap) /* mask all interrupts and clear all pending ones */ uap->im = 0; - writew(uap->im, uap->port.membase + UART011_IMSC); - writew(0xffff, uap->port.membase + UART011_ICR); + pl011_write(uap->im, uap, REG_IMSC); + pl011_write(0xffff, uap, REG_ICR); spin_unlock_irq(&uap->port.lock); } @@ -1867,8 +1964,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, pl011_enable_ms(port); /* first, disable everything */ - old_cr = readw(port->membase + UART011_CR); - writew(0, port->membase + UART011_CR); + old_cr = pl011_read(uap, REG_CR); + pl011_write(0, uap, REG_CR); if (termios->c_cflag & CRTSCTS) { if (old_cr & UART011_CR_RTS) @@ -1901,17 +1998,17 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, quot -= 2; } /* Set baud rate */ - writew(quot & 0x3f, port->membase + UART011_FBRD); - writew(quot >> 6, port->membase + UART011_IBRD); + pl011_write(quot & 0x3f, uap, REG_FBRD); + pl011_write(quot >> 6, uap, REG_IBRD); /* * ----------v----------v----------v----------v----- - * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER - * UART011_FBRD & UART011_IBRD. + * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER + * REG_FBRD & REG_IBRD. * ----------^----------^----------^----------^----- */ pl011_write_lcr_h(uap, lcr_h); - writew(old_cr, port->membase + UART011_CR); + pl011_write(old_cr, uap, REG_CR); spin_unlock_irqrestore(&port->lock, flags); } @@ -2052,9 +2149,9 @@ static void pl011_console_putchar(struct uart_port *port, int ch) struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); - while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) barrier(); - writew(ch, uap->port.membase + UART01x_DR); + pl011_write(ch, uap, REG_DR); } static void @@ -2079,10 +2176,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) * First save the CR then disable the interrupts */ if (!uap->vendor->always_enabled) { - old_cr = readw(uap->port.membase + UART011_CR); + old_cr = pl011_read(uap, REG_CR); new_cr = old_cr & ~UART011_CR_CTSEN; new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; - writew(new_cr, uap->port.membase + UART011_CR); + pl011_write(new_cr, uap, REG_CR); } uart_console_write(&uap->port, s, count, pl011_console_putchar); @@ -2092,10 +2189,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) * and restore the TCR */ do { - status = readw(uap->port.membase + UART01x_FR); + status = pl011_read(uap, REG_FR); } while (status & UART01x_FR_BUSY); if (!uap->vendor->always_enabled) - writew(old_cr, uap->port.membase + UART011_CR); + pl011_write(old_cr, uap, REG_CR); if (locked) spin_unlock(&uap->port.lock); @@ -2108,10 +2205,10 @@ static void __init pl011_console_get_options(struct uart_amba_port *uap, int *baud, int *parity, int *bits) { - if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { + if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; - lcr_h = readw(uap->port.membase + uap->lcrh_tx); + lcr_h = pl011_read(uap, REG_LCRH_TX); *parity = 'n'; if (lcr_h & UART01x_LCRH_PEN) { @@ -2126,13 +2223,13 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, else *bits = 8; - ibrd = readw(uap->port.membase + UART011_IBRD); - fbrd = readw(uap->port.membase + UART011_FBRD); + ibrd = pl011_read(uap, REG_IBRD); + fbrd = pl011_read(uap, REG_FBRD); *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); if (uap->vendor->oversampling) { - if (readw(uap->port.membase + UART011_CR) + if (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT) *baud *= 2; } @@ -2206,7 +2303,10 @@ static void pl011_putc(struct uart_port *port, int c) { while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) ; - writeb(c, port->membase + UART01x_DR); + if (port->iotype == UPIO_MEM32) + writel(c, port->membase + UART01x_DR); + else + writeb(c, port->membase + UART01x_DR); while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) ; } @@ -2319,7 +2419,6 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, uap->port.dev = dev; uap->port.mapbase = mmiobase->start; uap->port.membase = base; - uap->port.iotype = UPIO_MEM; uap->port.fifosize = uap->fifosize; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = index; @@ -2334,8 +2433,8 @@ static int pl011_register_port(struct uart_amba_port *uap) int ret; /* Ensure interrupts from this UART are masked and cleared */ - writew(0, uap->port.membase + UART011_IMSC); - writew(0xffff, uap->port.membase + UART011_ICR); + pl011_write(0, uap, REG_IMSC); + pl011_write(0xffff, uap, REG_ICR); if (!amba_reg.state) { ret = uart_register_driver(&amba_reg); @@ -2372,10 +2471,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) if (IS_ERR(uap->clk)) return PTR_ERR(uap->clk); + uap->reg_offset = vendor->reg_offset; uap->vendor = vendor; - uap->lcrh_rx = vendor->lcrh_rx; - uap->lcrh_tx = vendor->lcrh_tx; uap->fifosize = vendor->get_fifosize(dev); + uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; uap->port.irq = dev->irq[0]; uap->port.ops = &amba_pl011_pops; @@ -2453,8 +2552,10 @@ static int sbsa_uart_probe(struct platform_device *pdev) if (!uap) return -ENOMEM; + uap->reg_offset = vendor_sbsa.reg_offset; uap->vendor = &vendor_sbsa; uap->fifosize = 32; + uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; uap->port.irq = platform_get_irq(pdev, 0); uap->port.ops = &sbsa_uart_pops; uap->fixed_baud = baudrate; diff --git a/drivers/tty/serial/amba-pl011.h b/drivers/tty/serial/amba-pl011.h new file mode 100644 index 000000000000..411c60e1f9a4 --- /dev/null +++ b/drivers/tty/serial/amba-pl011.h @@ -0,0 +1,34 @@ +#ifndef AMBA_PL011_H +#define AMBA_PL011_H + +enum { + REG_DR, + REG_ST_DMAWM, + REG_ST_TIMEOUT, + REG_FR, + REG_LCRH_RX, + REG_LCRH_TX, + REG_IBRD, + REG_FBRD, + REG_CR, + REG_IFLS, + REG_IMSC, + REG_RIS, + REG_MIS, + REG_ICR, + REG_DMACR, + REG_ST_XFCR, + REG_ST_XON1, + REG_ST_XON2, + REG_ST_XOFF1, + REG_ST_XOFF2, + REG_ST_ITCR, + REG_ST_ITIP, + REG_ST_ABCR, + REG_ST_ABIMSC, + + /* The size of the array - must be last */ + REG_ARRAY_SIZE, +}; + +#endif diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 94294558943c..1c0884d8ef32 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -22,7 +22,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ -#include <linux/module.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/slab.h> @@ -155,7 +154,6 @@ struct atmel_uart_port { struct circ_buf rx_ring; struct mctrl_gpios *gpios; - int gpio_irq[UART_GPIO_MAX]; unsigned int tx_done_mask; u32 fifo_size; u32 rts_high; @@ -190,8 +188,6 @@ static const struct of_device_id atmel_serial_dt_ids[] = { { .compatible = "atmel,at91sam9260-usart" }, { /* sentinel */ } }; - -MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids); #endif static inline struct atmel_uart_port * @@ -550,27 +546,21 @@ static void atmel_enable_ms(struct uart_port *port) atmel_port->ms_irq_enabled = true; - if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) - enable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) ier |= ATMEL_US_CTSIC; - if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) - enable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) ier |= ATMEL_US_DSRIC; - if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) - enable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) ier |= ATMEL_US_RIIC; - if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) - enable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) ier |= ATMEL_US_DCDIC; atmel_uart_writel(port, ATMEL_US_IER, ier); + + mctrl_gpio_enable_ms(atmel_port->gpios); } /* @@ -589,24 +579,18 @@ static void atmel_disable_ms(struct uart_port *port) atmel_port->ms_irq_enabled = false; - if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) - disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); - else + mctrl_gpio_disable_ms(atmel_port->gpios); + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) idr |= ATMEL_US_CTSIC; - if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) - disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) idr |= ATMEL_US_DSRIC; - if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) - disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) idr |= ATMEL_US_RIIC; - if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) - disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); - else + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) idr |= ATMEL_US_DCDIC; atmel_uart_writel(port, ATMEL_US_IDR, idr); @@ -1264,7 +1248,6 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) struct uart_port *port = dev_id; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status, pending, mask, pass_counter = 0; - bool gpio_handled = false; spin_lock(&atmel_port->lock_suspended); @@ -1272,24 +1255,6 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) status = atmel_get_lines_status(port); mask = atmel_uart_readl(port, ATMEL_US_IMR); pending = status & mask; - if (!gpio_handled) { - /* - * Dealing with GPIO interrupt - */ - if (irq == atmel_port->gpio_irq[UART_GPIO_CTS]) - pending |= ATMEL_US_CTSIC; - - if (irq == atmel_port->gpio_irq[UART_GPIO_DSR]) - pending |= ATMEL_US_DSRIC; - - if (irq == atmel_port->gpio_irq[UART_GPIO_RI]) - pending |= ATMEL_US_RIIC; - - if (irq == atmel_port->gpio_irq[UART_GPIO_DCD]) - pending |= ATMEL_US_DCDIC; - - gpio_handled = true; - } if (!pending) break; @@ -1778,45 +1743,6 @@ static void atmel_get_ip_name(struct uart_port *port) } } -static void atmel_free_gpio_irq(struct uart_port *port) -{ - struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - enum mctrl_gpio_idx i; - - for (i = 0; i < UART_GPIO_MAX; i++) - if (atmel_port->gpio_irq[i] >= 0) - free_irq(atmel_port->gpio_irq[i], port); -} - -static int atmel_request_gpio_irq(struct uart_port *port) -{ - struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - int *irq = atmel_port->gpio_irq; - enum mctrl_gpio_idx i; - int err = 0; - - for (i = 0; (i < UART_GPIO_MAX) && !err; i++) { - if (irq[i] < 0) - continue; - - irq_set_status_flags(irq[i], IRQ_NOAUTOEN); - err = request_irq(irq[i], atmel_interrupt, IRQ_TYPE_EDGE_BOTH, - "atmel_serial", port); - if (err) - dev_err(port->dev, "atmel_startup - Can't get %d irq\n", - irq[i]); - } - - /* - * If something went wrong, rollback. - */ - while (err && (--i >= 0)) - if (irq[i] >= 0) - free_irq(irq[i], port); - - return err; -} - /* * Perform initialization and enable port for reception */ @@ -1846,13 +1772,6 @@ static int atmel_startup(struct uart_port *port) return retval; } - /* - * Get the GPIO lines IRQ - */ - retval = atmel_request_gpio_irq(port); - if (retval) - goto free_irq; - tasklet_enable(&atmel_port->tasklet); /* @@ -1948,11 +1867,6 @@ static int atmel_startup(struct uart_port *port) } return 0; - -free_irq: - free_irq(port->irq, port); - - return retval; } /* @@ -2018,7 +1932,6 @@ static void atmel_shutdown(struct uart_port *port) * Free the interrupts */ free_irq(port->irq, port); - atmel_free_gpio_irq(port); atmel_port->ms_irq_enabled = false; @@ -2686,26 +2599,6 @@ static int atmel_serial_resume(struct platform_device *pdev) #define atmel_serial_resume NULL #endif -static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev) -{ - enum mctrl_gpio_idx i; - struct gpio_desc *gpiod; - - p->gpios = mctrl_gpio_init_noauto(dev, 0); - if (IS_ERR(p->gpios)) - return PTR_ERR(p->gpios); - - for (i = 0; i < UART_GPIO_MAX; i++) { - gpiod = mctrl_gpio_to_gpiod(p->gpios, i); - if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN)) - p->gpio_irq[i] = gpiod_to_irq(gpiod); - else - p->gpio_irq[i] = -EINVAL; - } - - return 0; -} - static void atmel_serial_probe_fifos(struct atmel_uart_port *port, struct platform_device *pdev) { @@ -2788,16 +2681,16 @@ static int atmel_serial_probe(struct platform_device *pdev) spin_lock_init(&port->lock_suspended); - ret = atmel_init_gpios(port, &pdev->dev); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to initialize GPIOs."); - goto err_clear_bit; - } - ret = atmel_init_port(port, pdev); if (ret) goto err_clear_bit; + port->gpios = mctrl_gpio_init(&port->uart, 0); + if (IS_ERR(port->gpios)) { + ret = PTR_ERR(port->gpios); + goto err_clear_bit; + } + if (!atmel_use_pdc_rx(&port->uart)) { ret = -ENOMEM; data = kmalloc(sizeof(struct atmel_uart_char) @@ -2866,37 +2759,14 @@ err: return ret; } -static int atmel_serial_remove(struct platform_device *pdev) -{ - struct uart_port *port = platform_get_drvdata(pdev); - struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - int ret = 0; - - tasklet_kill(&atmel_port->tasklet); - - device_init_wakeup(&pdev->dev, 0); - - ret = uart_remove_one_port(&atmel_uart, port); - - kfree(atmel_port->rx_ring.buf); - - /* "port" is allocated statically, so we shouldn't free it */ - - clear_bit(port->line, atmel_ports_in_use); - - clk_put(atmel_port->clk); - - return ret; -} - static struct platform_driver atmel_serial_driver = { .probe = atmel_serial_probe, - .remove = atmel_serial_remove, .suspend = atmel_serial_suspend, .resume = atmel_serial_resume, .driver = { - .name = "atmel_usart", - .of_match_table = of_match_ptr(atmel_serial_dt_ids), + .name = "atmel_usart", + .of_match_table = of_match_ptr(atmel_serial_dt_ids), + .suppress_bind_attrs = true, }, }; @@ -2914,17 +2784,4 @@ static int __init atmel_serial_init(void) return ret; } - -static void __exit atmel_serial_exit(void) -{ - platform_driver_unregister(&atmel_serial_driver); - uart_unregister_driver(&atmel_uart); -} - -module_init(atmel_serial_init); -module_exit(atmel_serial_exit); - -MODULE_AUTHOR("Rick Bronson"); -MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:atmel_usart"); +device_initcall(atmel_serial_init); diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index a1c0a89d9c7f..c28e5c24da16 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -653,7 +653,7 @@ static struct uart_ops bcm_uart_ops = { #ifdef CONFIG_SERIAL_BCM63XX_CONSOLE -static inline void wait_for_xmitr(struct uart_port *port) +static void wait_for_xmitr(struct uart_port *port) { unsigned int tmout; diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c index ae3cf94b146b..293ecbb00684 100644 --- a/drivers/tty/serial/bfin_uart.c +++ b/drivers/tty/serial/bfin_uart.c @@ -213,7 +213,7 @@ static void bfin_serial_stop_rx(struct uart_port *port) static void bfin_serial_rx_chars(struct bfin_serial_port *uart) { unsigned int status, ch, flg; - static struct timeval anomaly_start = { .tv_sec = 0 }; + static u64 anomaly_start; status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); @@ -246,27 +246,24 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) * character time +/- some percent. So 1.5 sounds good. All other * Blackfin families operate properly. Woo. */ - if (anomaly_start.tv_sec) { - struct timeval curr; - suseconds_t usecs; + if (anomaly_start > 0) { + u64 curr, nsecs, threshold_ns; if ((~ch & (~ch + 1)) & 0xff) goto known_good_char; - do_gettimeofday(&curr); - if (curr.tv_sec - anomaly_start.tv_sec > 1) + curr = ktime_get_ns(); + nsecs = curr - anomaly_start; + if (nsecs >> 32) goto known_good_char; - usecs = 0; - if (curr.tv_sec != anomaly_start.tv_sec) - usecs += USEC_PER_SEC; - usecs += curr.tv_usec - anomaly_start.tv_usec; - - if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) + threshold_ns = UART_GET_ANOMALY_THRESHOLD(uart) + * NSEC_PER_USEC; + if (nsecs > threshold_ns) goto known_good_char; if (ch) - anomaly_start.tv_sec = 0; + anomaly_start = 0; else anomaly_start = curr; @@ -274,14 +271,14 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) known_good_char: status &= ~BI; - anomaly_start.tv_sec = 0; + anomaly_start = 0; } } if (status & BI) { if (ANOMALY_05000363) if (bfin_revid() < 5) - do_gettimeofday(&anomaly_start); + anomaly_start = ktime_get_ns(); uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto ignore_char; diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index f09636083426..3f2423690d01 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -71,10 +71,16 @@ static int __init parse_options(struct earlycon_device *device, char *options) return -EINVAL; switch (port->iotype) { + case UPIO_MEM: + port->mapbase = addr; + break; + case UPIO_MEM16: + port->regshift = 1; + port->mapbase = addr; + break; case UPIO_MEM32: case UPIO_MEM32BE: - port->regshift = 2; /* fall-through */ - case UPIO_MEM: + port->regshift = 2; port->mapbase = addr; break; case UPIO_PORT: @@ -91,10 +97,11 @@ static int __init parse_options(struct earlycon_device *device, char *options) strlcpy(device->options, options, length); } - if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32 || - port->iotype == UPIO_MEM32BE) + if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 || + port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE) pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n", (port->iotype == UPIO_MEM) ? "" : + (port->iotype == UPIO_MEM16) ? "16" : (port->iotype == UPIO_MEM32) ? "32" : "32be", (unsigned long long)port->mapbase, device->options); @@ -115,6 +122,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match) if (buf && !parse_options(&early_console_dev, buf)) buf = NULL; + spin_lock_init(&port->lock); port->uartclk = BASE_BAUD * 16; if (port->mapbase) port->membase = earlycon_map(port->mapbase, 64); @@ -202,6 +210,7 @@ int __init of_setup_earlycon(unsigned long addr, int err; struct uart_port *port = &early_console_dev.port; + spin_lock_init(&port->lock); port->iotype = UPIO_MEM; port->mapbase = addr; port->uartclk = BASE_BAUD * 16; diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index ffc7cb2585a6..c60a8d5e4020 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -22,7 +22,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ -#define SERIAL_DO_RESTART #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 016e4be05cec..9362f54c816c 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -44,6 +44,8 @@ #include <linux/platform_data/serial-imx.h> #include <linux/platform_data/dma-imx.h> +#include "serial_mctrl_gpio.h" + /* Register definitions */ #define URXD0 0x0 /* Receiver Register */ #define URTX0 0x40 /* Transmitter Register */ @@ -148,8 +150,11 @@ #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ #define USR2_IDLE (1<<12) /* Idle condition */ +#define USR2_RIDELT (1<<10) /* Ring Interrupt Delta */ +#define USR2_RIIN (1<<9) /* Ring Indicator Input */ #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ #define USR2_WAKE (1<<7) /* Wake */ +#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */ #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ #define USR2_TXDC (1<<3) /* Transmitter complete */ #define USR2_BRCD (1<<2) /* Break condition */ @@ -206,6 +211,8 @@ struct imx_port { struct clk *clk_per; const struct imx_uart_data *devdata; + struct mctrl_gpios *gpios; + /* DMA fields */ unsigned int dma_is_inited:1; unsigned int dma_is_enabled:1; @@ -308,49 +315,24 @@ static void imx_port_ucrs_restore(struct uart_port *port, } #endif -/* - * Handle any change of modem status signal since we were last called. - */ -static void imx_mctrl_check(struct imx_port *sport) +static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2) { - unsigned int status, changed; - - status = sport->port.ops->get_mctrl(&sport->port); - changed = status ^ sport->old_status; - - if (changed == 0) - return; - - sport->old_status = status; + *ucr2 &= ~UCR2_CTSC; + *ucr2 |= UCR2_CTS; - if (changed & TIOCM_RI) - sport->port.icount.rng++; - if (changed & TIOCM_DSR) - sport->port.icount.dsr++; - if (changed & TIOCM_CAR) - uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); - if (changed & TIOCM_CTS) - uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - - wake_up_interruptible(&sport->port.state->port.delta_msr_wait); + mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); } -/* - * This is our per-port timeout handler, for checking the - * modem status signals. - */ -static void imx_timeout(unsigned long data) +static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) { - struct imx_port *sport = (struct imx_port *)data; - unsigned long flags; + *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - if (sport->port.state) { - spin_lock_irqsave(&sport->port.lock, flags); - imx_mctrl_check(sport); - spin_unlock_irqrestore(&sport->port.lock, flags); + mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); +} - mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); - } +static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2) +{ + *ucr2 |= UCR2_CTSC; } /* @@ -376,9 +358,9 @@ static void imx_stop_tx(struct uart_port *port) readl(port->membase + USR2) & USR2_TXDC) { temp = readl(port->membase + UCR2); if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) - temp &= ~UCR2_CTS; + imx_port_rts_inactive(sport, &temp); else - temp |= UCR2_CTS; + imx_port_rts_active(sport, &temp); writel(temp, port->membase + UCR2); temp = readl(port->membase + UCR4); @@ -420,6 +402,8 @@ static void imx_enable_ms(struct uart_port *port) struct imx_port *sport = (struct imx_port *)port; mod_timer(&sport->timer, jiffies); + + mctrl_gpio_enable_ms(sport->gpios); } static void imx_dma_tx(struct imx_port *sport); @@ -579,14 +563,14 @@ static void imx_start_tx(struct uart_port *port) unsigned long temp; if (port->rs485.flags & SER_RS485_ENABLED) { - /* enable transmitter and shifter empty irq */ temp = readl(port->membase + UCR2); if (port->rs485.flags & SER_RS485_RTS_ON_SEND) - temp &= ~UCR2_CTS; + imx_port_rts_inactive(sport, &temp); else - temp |= UCR2_CTS; + imx_port_rts_active(sport, &temp); writel(temp, port->membase + UCR2); + /* enable transmitter and shifter empty irq */ temp = readl(port->membase + UCR4); temp |= UCR4_TCEN; writel(temp, port->membase + UCR4); @@ -801,23 +785,35 @@ static unsigned int imx_tx_empty(struct uart_port *port) /* * We have a modem side uart, so the meanings of RTS and CTS are inverted. */ -static unsigned int imx_get_mctrl(struct uart_port *port) +static unsigned int imx_get_hwmctrl(struct imx_port *sport) { - struct imx_port *sport = (struct imx_port *)port; - unsigned int tmp = TIOCM_DSR | TIOCM_CAR; + unsigned int tmp = TIOCM_DSR; + unsigned usr1 = readl(sport->port.membase + USR1); - if (readl(sport->port.membase + USR1) & USR1_RTSS) + if (usr1 & USR1_RTSS) tmp |= TIOCM_CTS; - if (readl(sport->port.membase + UCR2) & UCR2_CTS) - tmp |= TIOCM_RTS; + /* in DCE mode DCDIN is always 0 */ + if (!(usr1 & USR2_DCDIN)) + tmp |= TIOCM_CAR; - if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP) - tmp |= TIOCM_LOOP; + /* in DCE mode RIIN is always 0 */ + if (readl(sport->port.membase + USR2) & USR2_RIIN) + tmp |= TIOCM_RI; return tmp; } +static unsigned int imx_get_mctrl(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned int ret = imx_get_hwmctrl(sport); + + mctrl_gpio_get(sport->gpios, &ret); + + return ret; +} + static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct imx_port *sport = (struct imx_port *)port; @@ -831,10 +827,17 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) writel(temp, sport->port.membase + UCR2); } + temp = readl(sport->port.membase + UCR3) & ~UCR3_DSR; + if (!(mctrl & TIOCM_DTR)) + temp |= UCR3_DSR; + writel(temp, sport->port.membase + UCR3); + temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP; if (mctrl & TIOCM_LOOP) temp |= UTS_LOOP; writel(temp, sport->port.membase + uts_reg(sport)); + + mctrl_gpio_set(sport->gpios, mctrl); } /* @@ -857,6 +860,51 @@ static void imx_break_ctl(struct uart_port *port, int break_state) spin_unlock_irqrestore(&sport->port.lock, flags); } +/* + * Handle any change of modem status signal since we were last called. + */ +static void imx_mctrl_check(struct imx_port *sport) +{ + unsigned int status, changed; + + status = imx_get_hwmctrl(sport); + changed = status ^ sport->old_status; + + if (changed == 0) + return; + + sport->old_status = status; + + if (changed & TIOCM_RI) + sport->port.icount.rng++; + if (changed & TIOCM_DSR) + sport->port.icount.dsr++; + if (changed & TIOCM_CAR) + uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); + if (changed & TIOCM_CTS) + uart_handle_cts_change(&sport->port, status & TIOCM_CTS); + + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); +} + +/* + * This is our per-port timeout handler, for checking the + * modem status signals. + */ +static void imx_timeout(unsigned long data) +{ + struct imx_port *sport = (struct imx_port *)data; + unsigned long flags; + + if (sport->port.state) { + spin_lock_irqsave(&sport->port.lock, flags); + imx_mctrl_check(sport); + spin_unlock_irqrestore(&sport->port.lock, flags); + + mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); + } +} + #define RX_BUF_SIZE (PAGE_SIZE) static void imx_rx_dma_done(struct imx_port *sport) { @@ -1207,6 +1255,8 @@ static void imx_shutdown(struct uart_port *port) imx_uart_dma_exit(sport); } + mctrl_gpio_disable_ms(sport->gpios); + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); @@ -1284,9 +1334,10 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, { struct imx_port *sport = (struct imx_port *)port; unsigned long flags; - unsigned int ucr2, old_ucr1, old_ucr2, baud, quot; + unsigned long ucr2, old_ucr1, old_ucr2; + unsigned int baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; - unsigned int div, ufcr; + unsigned long div, ufcr; unsigned long num, denom; uint64_t tdiv64; @@ -1315,19 +1366,25 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, * it under manual control and keep transmitter * disabled. */ - if (!(port->rs485.flags & - SER_RS485_RTS_AFTER_SEND)) - ucr2 |= UCR2_CTS; + if (port->rs485.flags & + SER_RS485_RTS_AFTER_SEND) + imx_port_rts_inactive(sport, &ucr2); + else + imx_port_rts_active(sport, &ucr2); } else { - ucr2 |= UCR2_CTSC; + imx_port_rts_auto(sport, &ucr2); } } else { termios->c_cflag &= ~CRTSCTS; } - } else if (port->rs485.flags & SER_RS485_ENABLED) + } else if (port->rs485.flags & SER_RS485_ENABLED) { /* disable transmitter */ - if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) - ucr2 |= UCR2_CTS; + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + imx_port_rts_inactive(sport, &ucr2); + else + imx_port_rts_active(sport, &ucr2); + } + if (termios->c_cflag & CSTOPB) ucr2 |= UCR2_STPB; @@ -1568,11 +1625,10 @@ static int imx_rs485_config(struct uart_port *port, /* disable transmitter */ temp = readl(sport->port.membase + UCR2); - temp &= ~UCR2_CTSC; if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) - temp &= ~UCR2_CTS; + imx_port_rts_inactive(sport, &temp); else - temp |= UCR2_CTS; + imx_port_rts_active(sport, &temp); writel(temp, sport->port.membase + UCR2); } @@ -1857,11 +1913,10 @@ static int serial_imx_probe_dt(struct imx_port *sport, struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *of_id = - of_match_device(imx_uart_dt_ids, &pdev->dev); int ret; - if (!np) + sport->devdata = of_device_get_match_data(&pdev->dev); + if (!sport->devdata) /* no device tree device */ return 1; @@ -1878,8 +1933,6 @@ static int serial_imx_probe_dt(struct imx_port *sport, if (of_get_property(np, "fsl,dte-mode", NULL)) sport->dte_mode = 1; - sport->devdata = of_id->data; - return 0; } #else @@ -1948,6 +2001,10 @@ static int serial_imx_probe(struct platform_device *pdev) sport->timer.function = imx_timeout; sport->timer.data = (unsigned long)sport; + sport->gpios = mctrl_gpio_init(&sport->port, 0); + if (IS_ERR(sport->gpios)) + return PTR_ERR(sport->gpios); + sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(sport->clk_ipg)) { ret = PTR_ERR(sport->clk_ipg); diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index efbd87a76656..a119f11bf2f4 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -70,7 +70,7 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out; } - rc = pci_request_regions(pdev, "jsm"); + rc = pci_request_regions(pdev, JSM_DRIVER_NAME); if (rc) { dev_err(&pdev->dev, "pci_request_region FAILED\n"); goto out_disable_device; @@ -328,7 +328,7 @@ static struct pci_device_id jsm_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, jsm_pci_tbl); static struct pci_driver jsm_driver = { - .name = "jsm", + .name = JSM_DRIVER_NAME, .id_table = jsm_pci_tbl, .probe = jsm_probe_one, .remove = jsm_remove_one, diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c index 932b2accd06f..c6fdd6369534 100644 --- a/drivers/tty/serial/jsm/jsm_neo.c +++ b/drivers/tty/serial/jsm/jsm_neo.c @@ -714,7 +714,7 @@ static void neo_clear_break(struct jsm_channel *ch) /* * Parse the ISR register. */ -static inline void neo_parse_isr(struct jsm_board *brd, u32 port) +static void neo_parse_isr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; u8 isr; diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c index 8f7f83a14c93..0eeb64f2499c 100644 --- a/drivers/tty/serial/m32r_sio.c +++ b/drivers/tty/serial/m32r_sio.c @@ -990,7 +990,7 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv) /* * Wait for transmitter & holding register to empty */ -static inline void wait_for_xmitr(struct uart_sio_port *up) +static void wait_for_xmitr(struct uart_sio_port *up) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c index 3141aa20843d..a44290e9b5a8 100644 --- a/drivers/tty/serial/men_z135_uart.c +++ b/drivers/tty/serial/men_z135_uart.c @@ -158,7 +158,7 @@ static inline void men_z135_reg_set(struct men_z135_port *uart, * @addr: Register address * @val: value to clear */ -static inline void men_z135_reg_clr(struct men_z135_port *uart, +static void men_z135_reg_clr(struct men_z135_port *uart, u32 addr, u32 val) { struct uart_port *port = &uart->port; diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 0fc83c962d10..b12a37bd37b6 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -57,6 +57,7 @@ #define AML_UART_RX_EMPTY BIT(20) #define AML_UART_TX_FULL BIT(21) #define AML_UART_TX_EMPTY BIT(22) +#define AML_UART_XMIT_BUSY BIT(25) #define AML_UART_ERR (AML_UART_PARITY_ERR | \ AML_UART_FRAME_ERR | \ AML_UART_TX_FIFO_WERR) @@ -100,7 +101,8 @@ static unsigned int meson_uart_tx_empty(struct uart_port *port) u32 val; val = readl(port->membase + AML_UART_STATUS); - return (val & AML_UART_TX_EMPTY) ? TIOCSER_TEMT : 0; + val &= (AML_UART_TX_EMPTY | AML_UART_XMIT_BUSY); + return (val == AML_UART_TX_EMPTY) ? TIOCSER_TEMT : 0; } static void meson_uart_stop_tx(struct uart_port *port) @@ -108,7 +110,7 @@ static void meson_uart_stop_tx(struct uart_port *port) u32 val; val = readl(port->membase + AML_UART_CONTROL); - val &= ~AML_UART_TX_EN; + val &= ~AML_UART_TX_INT_EN; writel(val, port->membase + AML_UART_CONTROL); } @@ -131,7 +133,7 @@ static void meson_uart_shutdown(struct uart_port *port) spin_lock_irqsave(&port->lock, flags); val = readl(port->membase + AML_UART_CONTROL); - val &= ~(AML_UART_RX_EN | AML_UART_TX_EN); + val &= ~AML_UART_RX_EN; val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN); writel(val, port->membase + AML_UART_CONTROL); @@ -142,6 +144,7 @@ static void meson_uart_start_tx(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; unsigned int ch; + u32 val; if (uart_tx_stopped(port)) { meson_uart_stop_tx(port); @@ -165,6 +168,12 @@ static void meson_uart_start_tx(struct uart_port *port) port->icount.tx++; } + if (!uart_circ_empty(xmit)) { + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_TX_INT_EN; + writel(val, port->membase + AML_UART_CONTROL); + } + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } @@ -228,8 +237,10 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id) if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)) meson_receive_chars(port); - if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) - meson_uart_start_tx(port); + if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) { + if (readl(port->membase + AML_UART_CONTROL) & AML_UART_TX_INT_EN) + meson_uart_start_tx(port); + } spin_unlock(&port->lock); @@ -241,10 +252,9 @@ static const char *meson_uart_type(struct uart_port *port) return (port->type == PORT_MESON) ? "meson_uart" : NULL; } -static int meson_uart_startup(struct uart_port *port) +static void meson_uart_reset(struct uart_port *port) { u32 val; - int ret = 0; val = readl(port->membase + AML_UART_CONTROL); val |= (AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLR_ERR); @@ -252,6 +262,18 @@ static int meson_uart_startup(struct uart_port *port) val &= ~(AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLR_ERR); writel(val, port->membase + AML_UART_CONTROL); +} + +static int meson_uart_startup(struct uart_port *port) +{ + u32 val; + int ret = 0; + + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_CLR_ERR; + writel(val, port->membase + AML_UART_CONTROL); + val &= ~AML_UART_CLR_ERR; + writel(val, port->membase + AML_UART_CONTROL); val |= (AML_UART_RX_EN | AML_UART_TX_EN); writel(val, port->membase + AML_UART_CONTROL); @@ -272,7 +294,7 @@ static void meson_uart_change_speed(struct uart_port *port, unsigned long baud) { u32 val; - while (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_EMPTY)) + while (!meson_uart_tx_empty(port)) cpu_relax(); val = readl(port->membase + AML_UART_REG5); @@ -367,9 +389,26 @@ static int meson_uart_verify_port(struct uart_port *port, return ret; } +static int meson_uart_res_size(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(port->dev, "cannot obtain I/O memory region"); + return -ENODEV; + } + + return resource_size(res); +} + static void meson_uart_release_port(struct uart_port *port) { + int size = meson_uart_res_size(port); + if (port->flags & UPF_IOREMAP) { + devm_release_mem_region(port->dev, port->mapbase, size); devm_iounmap(port->dev, port->membase); port->membase = NULL; } @@ -377,16 +416,10 @@ static void meson_uart_release_port(struct uart_port *port) static int meson_uart_request_port(struct uart_port *port) { - struct platform_device *pdev = to_platform_device(port->dev); - struct resource *res; - int size; + int size = meson_uart_res_size(port); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "cannot obtain I/O memory region"); - return -ENODEV; - } - size = resource_size(res); + if (size < 0) + return size; if (!devm_request_mem_region(port->dev, port->mapbase, size, dev_name(port->dev))) { @@ -448,6 +481,7 @@ static void meson_serial_console_write(struct console *co, const char *s, struct uart_port *port; unsigned long flags; int locked; + u32 val, tmp; port = meson_ports[co->index]; if (!port) @@ -463,7 +497,13 @@ static void meson_serial_console_write(struct console *co, const char *s, locked = 1; } + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_TX_EN; + tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN); + writel(tmp, port->membase + AML_UART_CONTROL); + uart_console_write(port, s, count, meson_console_putchar); + writel(val, port->membase + AML_UART_CONTROL); if (locked) spin_unlock(&port->lock); @@ -570,6 +610,12 @@ static int meson_uart_probe(struct platform_device *pdev) meson_ports[pdev->id] = port; platform_set_drvdata(pdev, port); + /* reset port before registering (and possibly registering console) */ + if (meson_uart_request_port(port) >= 0) { + meson_uart_reset(port); + meson_uart_release_port(port); + } + ret = uart_add_one_port(&meson_uart_driver, port); if (ret) meson_ports[pdev->id] = NULL; diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c deleted file mode 100644 index 5da7622e88c3..000000000000 --- a/drivers/tty/serial/nwpserial.c +++ /dev/null @@ -1,477 +0,0 @@ -/* - * Serial Port driver for a NWP uart device - * - * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ -#include <linux/init.h> -#include <linux/export.h> -#include <linux/console.h> -#include <linux/serial.h> -#include <linux/serial_reg.h> -#include <linux/serial_core.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/irqreturn.h> -#include <linux/mutex.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> -#include <linux/nwpserial.h> -#include <linux/delay.h> -#include <asm/prom.h> -#include <asm/dcr.h> - -#define NWPSERIAL_NR 2 - -#define NWPSERIAL_STATUS_RXVALID 0x1 -#define NWPSERIAL_STATUS_TXFULL 0x2 - -struct nwpserial_port { - struct uart_port port; - dcr_host_t dcr_host; - unsigned int ier; - unsigned int mcr; -}; - -static DEFINE_MUTEX(nwpserial_mutex); -static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; - -static void wait_for_bits(struct nwpserial_port *up, int bits) -{ - unsigned int status, tmout = 10000; - - /* Wait up to 10ms for the character(s) to be sent. */ - do { - status = dcr_read(up->dcr_host, UART_LSR); - - if (--tmout == 0) - break; - udelay(1); - } while ((status & bits) != bits); -} - -#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE -static void nwpserial_console_putchar(struct uart_port *port, int c) -{ - struct nwpserial_port *up; - up = container_of(port, struct nwpserial_port, port); - /* check if tx buffer is full */ - wait_for_bits(up, UART_LSR_THRE); - dcr_write(up->dcr_host, UART_TX, c); - up->port.icount.tx++; -} - -static void -nwpserial_console_write(struct console *co, const char *s, unsigned int count) -{ - struct nwpserial_port *up = &nwpserial_ports[co->index]; - unsigned long flags; - int locked = 1; - - if (oops_in_progress) - locked = spin_trylock_irqsave(&up->port.lock, flags); - else - spin_lock_irqsave(&up->port.lock, flags); - - /* save and disable interrupt */ - up->ier = dcr_read(up->dcr_host, UART_IER); - dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); - - uart_console_write(&up->port, s, count, nwpserial_console_putchar); - - /* wait for transmitter to become empty */ - while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) - cpu_relax(); - - /* restore interrupt state */ - dcr_write(up->dcr_host, UART_IER, up->ier); - - if (locked) - spin_unlock_irqrestore(&up->port.lock, flags); -} - -static struct uart_driver nwpserial_reg; -static struct console nwpserial_console = { - .name = "ttySQ", - .write = nwpserial_console_write, - .device = uart_console_device, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &nwpserial_reg, -}; -#define NWPSERIAL_CONSOLE (&nwpserial_console) -#else -#define NWPSERIAL_CONSOLE NULL -#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ - -/**************************************************************************/ - -static int nwpserial_request_port(struct uart_port *port) -{ - return 0; -} - -static void nwpserial_release_port(struct uart_port *port) -{ - /* N/A */ -} - -static void nwpserial_config_port(struct uart_port *port, int flags) -{ - port->type = PORT_NWPSERIAL; -} - -static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) -{ - struct nwpserial_port *up = dev_id; - struct tty_port *port = &up->port.state->port; - irqreturn_t ret; - unsigned int iir; - unsigned char ch; - - spin_lock(&up->port.lock); - - /* check if the uart was the interrupt source. */ - iir = dcr_read(up->dcr_host, UART_IIR); - if (!iir) { - ret = IRQ_NONE; - goto out; - } - - do { - up->port.icount.rx++; - ch = dcr_read(up->dcr_host, UART_RX); - if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) - tty_insert_flip_char(port, ch, TTY_NORMAL); - } while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR); - - spin_unlock(&up->port.lock); - tty_flip_buffer_push(port); - spin_lock(&up->port.lock); - - ret = IRQ_HANDLED; - - /* clear interrupt */ - dcr_write(up->dcr_host, UART_IIR, 1); -out: - spin_unlock(&up->port.lock); - return ret; -} - -static int nwpserial_startup(struct uart_port *port) -{ - struct nwpserial_port *up; - int err; - - up = container_of(port, struct nwpserial_port, port); - - /* disable flow control by default */ - up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; - dcr_write(up->dcr_host, UART_MCR, up->mcr); - - /* register interrupt handler */ - err = request_irq(up->port.irq, nwpserial_interrupt, - IRQF_SHARED, "nwpserial", up); - if (err) - return err; - - /* enable interrupts */ - up->ier = UART_IER_RDI; - dcr_write(up->dcr_host, UART_IER, up->ier); - - /* enable receiving */ - up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; - - return 0; -} - -static void nwpserial_shutdown(struct uart_port *port) -{ - struct nwpserial_port *up; - up = container_of(port, struct nwpserial_port, port); - - /* disable receiving */ - up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; - - /* disable interrupts from this port */ - up->ier = 0; - dcr_write(up->dcr_host, UART_IER, up->ier); - - /* free irq */ - free_irq(up->port.irq, up); -} - -static int nwpserial_verify_port(struct uart_port *port, - struct serial_struct *ser) -{ - return -EINVAL; -} - -static const char *nwpserial_type(struct uart_port *port) -{ - return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; -} - -static void nwpserial_set_termios(struct uart_port *port, - struct ktermios *termios, struct ktermios *old) -{ - struct nwpserial_port *up; - up = container_of(port, struct nwpserial_port, port); - - up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID - | NWPSERIAL_STATUS_TXFULL; - - up->port.ignore_status_mask = 0; - /* ignore all characters if CREAD is not set */ - if ((termios->c_cflag & CREAD) == 0) - up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; - - /* Copy back the old hardware settings */ - if (old) - tty_termios_copy_hw(termios, old); -} - -static void nwpserial_break_ctl(struct uart_port *port, int ctl) -{ - /* N/A */ -} - -static void nwpserial_stop_rx(struct uart_port *port) -{ - struct nwpserial_port *up; - up = container_of(port, struct nwpserial_port, port); - /* don't forward any more data (like !CREAD) */ - up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; -} - -static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) -{ - /* check if tx buffer is full */ - wait_for_bits(up, UART_LSR_THRE); - dcr_write(up->dcr_host, UART_TX, c); - up->port.icount.tx++; -} - -static void nwpserial_start_tx(struct uart_port *port) -{ - struct nwpserial_port *up; - struct circ_buf *xmit; - up = container_of(port, struct nwpserial_port, port); - xmit = &up->port.state->xmit; - - if (port->x_char) { - nwpserial_putchar(up, up->port.x_char); - port->x_char = 0; - } - - while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { - nwpserial_putchar(up, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); - } -} - -static unsigned int nwpserial_get_mctrl(struct uart_port *port) -{ - return 0; -} - -static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) -{ - /* N/A */ -} - -static void nwpserial_stop_tx(struct uart_port *port) -{ - /* N/A */ -} - -static unsigned int nwpserial_tx_empty(struct uart_port *port) -{ - struct nwpserial_port *up; - unsigned long flags; - int ret; - up = container_of(port, struct nwpserial_port, port); - - spin_lock_irqsave(&up->port.lock, flags); - ret = dcr_read(up->dcr_host, UART_LSR); - spin_unlock_irqrestore(&up->port.lock, flags); - - return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; -} - -static struct uart_ops nwpserial_pops = { - .tx_empty = nwpserial_tx_empty, - .set_mctrl = nwpserial_set_mctrl, - .get_mctrl = nwpserial_get_mctrl, - .stop_tx = nwpserial_stop_tx, - .start_tx = nwpserial_start_tx, - .stop_rx = nwpserial_stop_rx, - .break_ctl = nwpserial_break_ctl, - .startup = nwpserial_startup, - .shutdown = nwpserial_shutdown, - .set_termios = nwpserial_set_termios, - .type = nwpserial_type, - .release_port = nwpserial_release_port, - .request_port = nwpserial_request_port, - .config_port = nwpserial_config_port, - .verify_port = nwpserial_verify_port, -}; - -static struct uart_driver nwpserial_reg = { - .owner = THIS_MODULE, - .driver_name = "nwpserial", - .dev_name = "ttySQ", - .major = TTY_MAJOR, - .minor = 68, - .nr = NWPSERIAL_NR, - .cons = NWPSERIAL_CONSOLE, -}; - -int nwpserial_register_port(struct uart_port *port) -{ - struct nwpserial_port *up = NULL; - int ret = -1; - int i; - static int first = 1; - int dcr_len; - int dcr_base; - struct device_node *dn; - - mutex_lock(&nwpserial_mutex); - - dn = port->dev->of_node; - if (dn == NULL) - goto out; - - /* get dcr base. */ - dcr_base = dcr_resource_start(dn, 0); - - /* find matching entry */ - for (i = 0; i < NWPSERIAL_NR; i++) - if (nwpserial_ports[i].port.iobase == dcr_base) { - up = &nwpserial_ports[i]; - break; - } - - /* we didn't find a mtching entry, search for a free port */ - if (up == NULL) - for (i = 0; i < NWPSERIAL_NR; i++) - if (nwpserial_ports[i].port.type == PORT_UNKNOWN && - nwpserial_ports[i].port.iobase == 0) { - up = &nwpserial_ports[i]; - break; - } - - if (up == NULL) { - ret = -EBUSY; - goto out; - } - - if (first) - uart_register_driver(&nwpserial_reg); - first = 0; - - up->port.membase = port->membase; - up->port.irq = port->irq; - up->port.uartclk = port->uartclk; - up->port.fifosize = port->fifosize; - up->port.regshift = port->regshift; - up->port.iotype = port->iotype; - up->port.flags = port->flags; - up->port.mapbase = port->mapbase; - up->port.private_data = port->private_data; - - if (port->dev) - up->port.dev = port->dev; - - if (up->port.iobase != dcr_base) { - up->port.ops = &nwpserial_pops; - up->port.fifosize = 16; - - spin_lock_init(&up->port.lock); - - up->port.iobase = dcr_base; - dcr_len = dcr_resource_len(dn, 0); - - up->dcr_host = dcr_map(dn, dcr_base, dcr_len); - if (!DCR_MAP_OK(up->dcr_host)) { - printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); - goto out; - } - } - - ret = uart_add_one_port(&nwpserial_reg, &up->port); - if (ret == 0) - ret = up->port.line; - -out: - mutex_unlock(&nwpserial_mutex); - - return ret; -} -EXPORT_SYMBOL(nwpserial_register_port); - -void nwpserial_unregister_port(int line) -{ - struct nwpserial_port *up = &nwpserial_ports[line]; - mutex_lock(&nwpserial_mutex); - uart_remove_one_port(&nwpserial_reg, &up->port); - - up->port.type = PORT_UNKNOWN; - - mutex_unlock(&nwpserial_mutex); -} -EXPORT_SYMBOL(nwpserial_unregister_port); - -#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE -static int __init nwpserial_console_init(void) -{ - struct nwpserial_port *up = NULL; - struct device_node *dn; - const char *name; - int dcr_base; - int dcr_len; - int i; - - /* search for a free port */ - for (i = 0; i < NWPSERIAL_NR; i++) - if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { - up = &nwpserial_ports[i]; - break; - } - - if (up == NULL) - return -1; - - name = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (name == NULL) - return -1; - - dn = of_find_node_by_path(name); - if (!dn) - return -1; - - spin_lock_init(&up->port.lock); - up->port.ops = &nwpserial_pops; - up->port.type = PORT_NWPSERIAL; - up->port.fifosize = 16; - - dcr_base = dcr_resource_start(dn, 0); - dcr_len = dcr_resource_len(dn, 0); - up->port.iobase = dcr_base; - - up->dcr_host = dcr_map(dn, dcr_base, dcr_len); - if (!DCR_MAP_OK(up->dcr_host)) { - printk("Cannot map DCR resources for SERIAL"); - return -1; - } - register_console(&nwpserial_console); - return 0; -} -console_initcall(nwpserial_console_init); -#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 9d4c84f7485f..b645f9228ed7 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port) #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -static inline void wait_for_xmitr(struct uart_omap_port *up) +static void wait_for_xmitr(struct uart_omap_port *up) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c index 9becba654892..41eab75ba2af 100644 --- a/drivers/tty/serial/pxa.c +++ b/drivers/tty/serial/pxa.c @@ -603,7 +603,7 @@ static struct uart_driver serial_pxa_reg; /* * Wait for transmitter & holding register to empty */ -static inline void wait_for_xmitr(struct uart_pxa_port *up) +static void wait_for_xmitr(struct uart_pxa_port *up) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index edb5305b9d4d..5815bcbc55b2 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -389,6 +389,13 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) const u8 line = sc16is7xx_line(port); u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line; + /* + * Don't send zero-length data, at least on SPI it confuses the chip + * delivering wrong TXLVL data. + */ + if (unlikely(!to_send)) + return; + regcache_cache_bypass(s->regmap, true); regmap_raw_write(s->regmap, addr, s->buf, to_send); regcache_cache_bypass(s->regmap, false); @@ -630,6 +637,12 @@ static void sc16is7xx_handle_tx(struct uart_port *port) if (likely(to_send)) { /* Limit to size of TX FIFO */ txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG); + if (txlen > SC16IS7XX_FIFO_SIZE) { + dev_err_ratelimited(port->dev, + "chip reports %d free bytes in TX fifo, but it only has %d", + txlen, SC16IS7XX_FIFO_SIZE); + txlen = 0; + } to_send = (to_send > txlen) ? txlen : to_send; /* Add data to send */ diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index def5199ca004..b1f54ab1818c 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -110,7 +110,7 @@ static void uart_start(struct tty_struct *tty) spin_unlock_irqrestore(&port->lock, flags); } -static inline void +static void uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) { unsigned long flags; @@ -1818,8 +1818,8 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co) * @options: ptr for <options> field; NULL if not present (out) * * Decodes earlycon kernel command line parameters of the form - * earlycon=<name>,io|mmio|mmio32|mmio32be|mmio32native,<addr>,<options> - * console=<name>,io|mmio|mmio32|mmio32be|mmio32native,<addr>,<options> + * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options> + * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options> * * The optional form * earlycon=<name>,0x<addr>,<options> @@ -1834,6 +1834,9 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr, if (strncmp(p, "mmio,", 5) == 0) { *iotype = UPIO_MEM; p += 5; + } else if (strncmp(p, "mmio16,", 7) == 0) { + *iotype = UPIO_MEM16; + p += 7; } else if (strncmp(p, "mmio32,", 7) == 0) { *iotype = UPIO_MEM32; p += 7; @@ -2186,6 +2189,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port) "I/O 0x%lx offset 0x%x", port->iobase, port->hub6); break; case UPIO_MEM: + case UPIO_MEM16: case UPIO_MEM32: case UPIO_MEM32BE: case UPIO_AU: @@ -2831,6 +2835,7 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2) return (port1->iobase == port2->iobase) && (port1->hub6 == port2->hub6); case UPIO_MEM: + case UPIO_MEM16: case UPIO_MEM32: case UPIO_MEM32BE: case UPIO_AU: diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index 3eb57eb532f1..226ad23b136c 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -193,6 +193,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx) return gpios; } +EXPORT_SYMBOL_GPL(mctrl_gpio_init); void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios) { @@ -247,3 +248,4 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) disable_irq(gpios->irq[i]); } } +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 960e50a97558..4646a9f531ad 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2,6 +2,7 @@ * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) * * Copyright (C) 2002 - 2011 Paul Mundt + * Copyright (C) 2015 Glider bvba * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). * * based off of the old drivers/char/sh-sci.c by: @@ -38,7 +39,6 @@ #include <linux/major.h> #include <linux/module.h> #include <linux/mm.h> -#include <linux/notifier.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -76,6 +76,14 @@ enum { ((port)->irqs[SCIx_ERI_IRQ] && \ ((port)->irqs[SCIx_RXI_IRQ] < 0)) +enum SCI_CLKS { + SCI_FCK, /* Functional Clock */ + SCI_SCK, /* Optional External Clock */ + SCI_BRG_INT, /* Optional BRG Internal Clock Source */ + SCI_SCIF_CLK, /* Optional BRG External Clock Source */ + SCI_NUM_CLKS +}; + struct sci_port { struct uart_port port; @@ -92,10 +100,9 @@ struct sci_port { struct timer_list break_timer; int break_flag; - /* Interface clock */ - struct clk *iclk; - /* Function clock */ - struct clk *fclk; + /* Clocks */ + struct clk *clks[SCI_NUM_CLKS]; + unsigned long clk_rates[SCI_NUM_CLKS]; int irqs[SCIx_NR_IRQS]; char *irqstr[SCIx_NR_IRQS]; @@ -116,8 +123,6 @@ struct sci_port { struct timer_list rx_timer; unsigned int rx_timeout; #endif - - struct notifier_block freq_transition; }; #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS @@ -163,6 +168,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -185,6 +192,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -206,6 +215,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = { 0x30, 16 }, [SCPDR] = { 0x34, 16 }, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -227,6 +238,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = { 0x30, 16 }, [SCPDR] = { 0x34, 16 }, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -249,6 +262,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -270,6 +285,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -291,6 +308,32 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, + }, + + /* + * Common SCIF definitions for ports with a Baud Rate Generator for + * External Clock (BRG). + */ + [SCIx_SH4_SCIF_BRG_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + [HSSRR] = sci_reg_invalid, + [SCPCR] = sci_reg_invalid, + [SCPDR] = sci_reg_invalid, + [SCDL] = { 0x30, 16 }, + [SCCKS] = { 0x34, 16 }, }, /* @@ -312,6 +355,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = { 0x40, 16 }, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = { 0x30, 16 }, + [SCCKS] = { 0x34, 16 }, }, /* @@ -334,6 +379,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -356,6 +403,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, /* @@ -378,6 +427,8 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { [HSSRR] = sci_reg_invalid, [SCPCR] = sci_reg_invalid, [SCPDR] = sci_reg_invalid, + [SCDL] = sci_reg_invalid, + [SCCKS] = sci_reg_invalid, }, }; @@ -452,18 +503,24 @@ static int sci_probe_regmap(struct plat_sci_port *cfg) static void sci_port_enable(struct sci_port *sci_port) { + unsigned int i; + if (!sci_port->port.dev) return; pm_runtime_get_sync(sci_port->port.dev); - clk_prepare_enable(sci_port->iclk); - sci_port->port.uartclk = clk_get_rate(sci_port->iclk); - clk_prepare_enable(sci_port->fclk); + for (i = 0; i < SCI_NUM_CLKS; i++) { + clk_prepare_enable(sci_port->clks[i]); + sci_port->clk_rates[i] = clk_get_rate(sci_port->clks[i]); + } + sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK]; } static void sci_port_disable(struct sci_port *sci_port) { + unsigned int i; + if (!sci_port->port.dev) return; @@ -475,8 +532,8 @@ static void sci_port_disable(struct sci_port *sci_port) del_timer_sync(&sci_port->break_timer); sci_port->break_flag = 0; - clk_disable_unprepare(sci_port->fclk); - clk_disable_unprepare(sci_port->iclk); + for (i = SCI_NUM_CLKS; i-- > 0; ) + clk_disable_unprepare(sci_port->clks[i]); pm_runtime_put_sync(sci_port->port.dev); } @@ -1437,7 +1494,7 @@ static void sci_request_dma(struct uart_port *port) sg_init_table(sg, 1); s->rx_buf[i] = buf; sg_dma_address(sg) = dma; - sg->length = s->buf_len_rx; + sg_dma_len(sg) = s->buf_len_rx; buf += s->buf_len_rx; dma += s->buf_len_rx; @@ -1606,29 +1663,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) return ret; } -/* - * Here we define a transition notifier so that we can update all of our - * ports' baud rate when the peripheral clock changes. - */ -static int sci_notifier(struct notifier_block *self, - unsigned long phase, void *p) -{ - struct sci_port *sci_port; - unsigned long flags; - - sci_port = container_of(self, struct sci_port, freq_transition); - - if (phase == CPUFREQ_POSTCHANGE) { - struct uart_port *port = &sci_port->port; - - spin_lock_irqsave(&port->lock, flags); - port->uartclk = clk_get_rate(sci_port->iclk); - spin_unlock_irqrestore(&port->lock, flags); - } - - return NOTIFY_OK; -} - static const struct sci_irq_desc { const char *desc; irq_handler_t handler; @@ -1864,90 +1898,149 @@ static void sci_shutdown(struct uart_port *port) sci_free_irq(s); } -static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps, - unsigned long freq) +static int sci_sck_calc(struct sci_port *s, unsigned int bps, + unsigned int *srr) { - if (s->sampling_rate) - return DIV_ROUND_CLOSEST(freq, s->sampling_rate * bps) - 1; + unsigned long freq = s->clk_rates[SCI_SCK]; + unsigned int min_sr, max_sr, sr; + int err, min_err = INT_MAX; + + if (s->sampling_rate) { + /* SCI(F) has a fixed sampling rate */ + min_sr = max_sr = s->sampling_rate / 2; + } else { + /* HSCIF has a variable 1/(8..32) sampling rate */ + min_sr = 8; + max_sr = 32; + } + + for (sr = max_sr; sr >= min_sr; sr--) { + err = DIV_ROUND_CLOSEST(freq, sr) - bps; + if (abs(err) >= abs(min_err)) + continue; - /* Warn, but use a safe default */ - WARN_ON(1); + min_err = err; + *srr = sr - 1; - return ((freq + 16 * bps) / (32 * bps) - 1); + if (!err) + break; + } + + dev_dbg(s->port.dev, "SCK: %u%+d bps using SR %u\n", bps, min_err, + *srr + 1); + return min_err; } -/* calculate frame length from SMR */ -static int sci_baud_calc_frame_len(unsigned int smr_val) +static int sci_brg_calc(struct sci_port *s, unsigned int bps, + unsigned long freq, unsigned int *dlr, + unsigned int *srr) { - int len = 10; + unsigned int min_sr, max_sr, sr, dl; + int err, min_err = INT_MAX; - if (smr_val & SCSMR_CHR) - len--; - if (smr_val & SCSMR_PE) - len++; - if (smr_val & SCSMR_STOP) - len++; + if (s->sampling_rate) { + /* SCIF has a fixed sampling rate */ + min_sr = max_sr = s->sampling_rate / 2; + } else { + /* HSCIF has a variable 1/(8..32) sampling rate */ + min_sr = 8; + max_sr = 32; + } - return len; -} + for (sr = max_sr; sr >= min_sr; sr--) { + dl = DIV_ROUND_CLOSEST(freq, sr * bps); + dl = clamp(dl, 1U, 65535U); + + err = DIV_ROUND_CLOSEST(freq, sr * dl) - bps; + if (abs(err) >= abs(min_err)) + continue; + min_err = err; + *dlr = dl; + *srr = sr - 1; -/* calculate sample rate, BRR, and clock select for HSCIF */ -static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq, - int *brr, unsigned int *srr, - unsigned int *cks, int frame_len) + if (!err) + break; + } + + dev_dbg(s->port.dev, "BRG: %u%+d bps using DL %u SR %u\n", bps, + min_err, *dlr, *srr + 1); + return min_err; +} + +/* calculate sample rate, BRR, and clock select */ +static int sci_scbrr_calc(struct sci_port *s, unsigned int bps, + unsigned int *brr, unsigned int *srr, + unsigned int *cks) { - int sr, c, br, err, recv_margin; - int min_err = 1000; /* 100% */ - int recv_max_margin = 0; + unsigned int min_sr, max_sr, shift, sr, br, prediv, scrate, c; + unsigned long freq = s->clk_rates[SCI_FCK]; + int err, min_err = INT_MAX; - /* Find the combination of sample rate and clock select with the - smallest deviation from the desired baud rate. */ - for (sr = 8; sr <= 32; sr++) { + if (s->sampling_rate) { + min_sr = max_sr = s->sampling_rate; + shift = 0; + } else { + /* HSCIF has a variable sample rate */ + min_sr = 8; + max_sr = 32; + shift = 1; + } + + /* + * Find the combination of sample rate and clock select with the + * smallest deviation from the desired baud rate. + * Prefer high sample rates to maximise the receive margin. + * + * M: Receive margin (%) + * N: Ratio of bit rate to clock (N = sampling rate) + * D: Clock duty (D = 0 to 1.0) + * L: Frame length (L = 9 to 12) + * F: Absolute value of clock frequency deviation + * + * M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) - + * (|D - 0.5| / N * (1 + F))| + * NOTE: Usually, treat D for 0.5, F is 0 by this calculation. + */ + for (sr = max_sr; sr >= min_sr; sr--) { for (c = 0; c <= 3; c++) { /* integerized formulas from HSCIF documentation */ - br = DIV_ROUND_CLOSEST(freq, (sr * - (1 << (2 * c + 1)) * bps)) - 1; - br = clamp(br, 0, 255); - err = DIV_ROUND_CLOSEST(freq, ((br + 1) * bps * sr * - (1 << (2 * c + 1)) / 1000)) - - 1000; - /* Calc recv margin - * M: Receive margin (%) - * N: Ratio of bit rate to clock (N = sampling rate) - * D: Clock duty (D = 0 to 1.0) - * L: Frame length (L = 9 to 12) - * F: Absolute value of clock frequency deviation + prediv = sr * (1 << (2 * c + shift)); + + /* + * We need to calculate: * - * M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) - - * (|D - 0.5| / N * (1 + F))| - * NOTE: Usually, treat D for 0.5, F is 0 by this - * calculation. + * br = freq / (prediv * bps) clamped to [1..256] + * err = freq / (br * prediv) - bps + * + * Watch out for overflow when calculating the desired + * sampling clock rate! */ - recv_margin = abs((500 - - DIV_ROUND_CLOSEST(1000, sr << 1)) / 10); - if (abs(min_err) > abs(err)) { - min_err = err; - recv_max_margin = recv_margin; - } else if ((min_err == err) && - (recv_margin > recv_max_margin)) - recv_max_margin = recv_margin; - else + if (bps > UINT_MAX / prediv) + break; + + scrate = prediv * bps; + br = DIV_ROUND_CLOSEST(freq, scrate); + br = clamp(br, 1U, 256U); + + err = DIV_ROUND_CLOSEST(freq, br * prediv) - bps; + if (abs(err) >= abs(min_err)) continue; - *brr = br; + min_err = err; + *brr = br - 1; *srr = sr - 1; *cks = c; + + if (!err) + goto found; } } - if (min_err == 1000) { - WARN_ON(1); - /* use defaults */ - *brr = 255; - *srr = 15; - *cks = 0; - } +found: + dev_dbg(s->port.dev, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps, + min_err, *brr, *srr + 1, *cks); + return min_err; } static void sci_reset(struct uart_port *port) @@ -1969,11 +2062,14 @@ static void sci_reset(struct uart_port *port) static void sci_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { + unsigned int baud, smr_val = 0, scr_val = 0, i; + unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0; + unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0; struct sci_port *s = to_sci_port(port); const struct plat_sci_reg *reg; - unsigned int baud, smr_val = 0, max_baud, cks = 0; - int t = -1; - unsigned int srr = 15; + int min_err = INT_MAX, err; + unsigned long max_freq = 0; + int best_clk = -1; if ((termios->c_cflag & CSIZE) == CS7) smr_val |= SCSMR_CHR; @@ -1992,41 +2088,123 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, * that the previous boot loader has enabled required clocks and * setup the baud rate generator hardware for us already. */ - max_baud = port->uartclk ? port->uartclk / 16 : 115200; - - baud = uart_get_baud_rate(port, termios, old, 0, max_baud); - if (likely(baud && port->uartclk)) { - if (s->cfg->type == PORT_HSCIF) { - int frame_len = sci_baud_calc_frame_len(smr_val); - sci_baud_calc_hscif(baud, port->uartclk, &t, &srr, - &cks, frame_len); - } else { - t = sci_scbrr_calc(s, baud, port->uartclk); - for (cks = 0; t >= 256 && cks <= 3; cks++) - t >>= 2; + if (!port->uartclk) { + baud = uart_get_baud_rate(port, termios, old, 0, 115200); + goto done; + } + + for (i = 0; i < SCI_NUM_CLKS; i++) + max_freq = max(max_freq, s->clk_rates[i]); + + baud = uart_get_baud_rate(port, termios, old, 0, + max_freq / max(s->sampling_rate, 8U)); + if (!baud) + goto done; + + /* + * There can be multiple sources for the sampling clock. Find the one + * that gives us the smallest deviation from the desired baud rate. + */ + + /* Optional Undivided External Clock */ + if (s->clk_rates[SCI_SCK] && port->type != PORT_SCIFA && + port->type != PORT_SCIFB) { + err = sci_sck_calc(s, baud, &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_SCK; + scr_val = SCSCR_CKE1; + sccks = SCCKS_CKS; + min_err = err; + srr = srr1; + if (!err) + goto done; } } + /* Optional BRG Frequency Divided External Clock */ + if (s->clk_rates[SCI_SCIF_CLK] && sci_getreg(port, SCDL)->size) { + err = sci_brg_calc(s, baud, s->clk_rates[SCI_SCIF_CLK], &dl1, + &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_SCIF_CLK; + scr_val = SCSCR_CKE1; + sccks = 0; + min_err = err; + dl = dl1; + srr = srr1; + if (!err) + goto done; + } + } + + /* Optional BRG Frequency Divided Internal Clock */ + if (s->clk_rates[SCI_BRG_INT] && sci_getreg(port, SCDL)->size) { + err = sci_brg_calc(s, baud, s->clk_rates[SCI_BRG_INT], &dl1, + &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_BRG_INT; + scr_val = SCSCR_CKE1; + sccks = SCCKS_XIN; + min_err = err; + dl = dl1; + srr = srr1; + if (!min_err) + goto done; + } + } + + /* Divided Functional Clock using standard Bit Rate Register */ + err = sci_scbrr_calc(s, baud, &brr1, &srr1, &cks1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_FCK; + scr_val = 0; + min_err = err; + brr = brr1; + srr = srr1; + cks = cks1; + } + +done: + if (best_clk >= 0) + dev_dbg(port->dev, "Using clk %pC for %u%+d bps\n", + s->clks[best_clk], baud, min_err); + sci_port_enable(s); - sci_reset(port); + /* + * Program the optional External Baud Rate Generator (BRG) first. + * It controls the mux to select (H)SCK or frequency divided clock. + */ + if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) { + serial_port_out(port, SCDL, dl); + serial_port_out(port, SCCKS, sccks); + } - smr_val |= serial_port_in(port, SCSMR) & SCSMR_CKS; + sci_reset(port); uart_update_timeout(port, termios->c_cflag, baud); - dev_dbg(port->dev, "%s: SMR %x, cks %x, t %x, SCSCR %x\n", - __func__, smr_val, cks, t, s->cfg->scscr); - - if (t >= 0) { - serial_port_out(port, SCSMR, (smr_val & ~SCSMR_CKS) | cks); - serial_port_out(port, SCBRR, t); - reg = sci_getreg(port, HSSRR); - if (reg->size) + if (best_clk >= 0) { + smr_val |= cks; + dev_dbg(port->dev, + "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n", + scr_val, smr_val, brr, sccks, dl, srr); + serial_port_out(port, SCSCR, scr_val); + serial_port_out(port, SCSMR, smr_val); + serial_port_out(port, SCBRR, brr); + if (sci_getreg(port, HSSRR)->size) serial_port_out(port, HSSRR, srr | HSCIF_SRE); - udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ - } else + + /* Wait one bit interval */ + udelay((1000000 + (baud - 1)) / baud); + } else { + /* Don't touch the bit rate configuration */ + scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0); + smr_val |= serial_port_in(port, SCSMR) & SCSMR_CKS; + dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val); + serial_port_out(port, SCSCR, scr_val); serial_port_out(port, SCSMR, smr_val); + } sci_init_pins(port, termios->c_cflag); @@ -2051,7 +2229,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, serial_port_out(port, SCFCR, ctrl); } - serial_port_out(port, SCSCR, s->cfg->scscr); + scr_val |= s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0); + dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val); + serial_port_out(port, SCSCR, scr_val); #ifdef CONFIG_SERIAL_SH_SCI_DMA /* @@ -2241,6 +2421,63 @@ static struct uart_ops sci_uart_ops = { #endif }; +static int sci_init_clocks(struct sci_port *sci_port, struct device *dev) +{ + const char *clk_names[] = { + [SCI_FCK] = "fck", + [SCI_SCK] = "sck", + [SCI_BRG_INT] = "brg_int", + [SCI_SCIF_CLK] = "scif_clk", + }; + struct clk *clk; + unsigned int i; + + if (sci_port->cfg->type == PORT_HSCIF) + clk_names[SCI_SCK] = "hsck"; + + for (i = 0; i < SCI_NUM_CLKS; i++) { + clk = devm_clk_get(dev, clk_names[i]); + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (IS_ERR(clk) && i == SCI_FCK) { + /* + * "fck" used to be called "sci_ick", and we need to + * maintain DT backward compatibility. + */ + clk = devm_clk_get(dev, "sci_ick"); + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (!IS_ERR(clk)) + goto found; + + /* + * Not all SH platforms declare a clock lookup entry + * for SCI devices, in which case we need to get the + * global "peripheral_clk" clock. + */ + clk = devm_clk_get(dev, "peripheral_clk"); + if (!IS_ERR(clk)) + goto found; + + dev_err(dev, "failed to get %s (%ld)\n", clk_names[i], + PTR_ERR(clk)); + return PTR_ERR(clk); + } + +found: + if (IS_ERR(clk)) + dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i], + PTR_ERR(clk)); + else + dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i], + clk, clk); + sci_port->clks[i] = IS_ERR(clk) ? NULL : clk; + } + return 0; +} + static int sci_init_single(struct platform_device *dev, struct sci_port *sci_port, unsigned int index, struct plat_sci_port *p, bool early) @@ -2333,22 +2570,9 @@ static int sci_init_single(struct platform_device *dev, sci_port->sampling_rate = p->sampling_rate; if (!early) { - sci_port->iclk = clk_get(&dev->dev, "sci_ick"); - if (IS_ERR(sci_port->iclk)) { - sci_port->iclk = clk_get(&dev->dev, "peripheral_clk"); - if (IS_ERR(sci_port->iclk)) { - dev_err(&dev->dev, "can't get iclk\n"); - return PTR_ERR(sci_port->iclk); - } - } - - /* - * The function clock is optional, ignore it if we can't - * find it. - */ - sci_port->fclk = clk_get(&dev->dev, "sci_fck"); - if (IS_ERR(sci_port->fclk)) - sci_port->fclk = NULL; + ret = sci_init_clocks(sci_port, &dev->dev); + if (ret < 0) + return ret; port->dev = &dev->dev; @@ -2405,9 +2629,6 @@ static int sci_init_single(struct platform_device *dev, static void sci_cleanup_single(struct sci_port *port) { - clk_put(port->iclk); - clk_put(port->fclk); - pm_runtime_disable(port->port.dev); } @@ -2426,7 +2647,7 @@ static void serial_console_write(struct console *co, const char *s, { struct sci_port *sci_port = &sci_ports[co->index]; struct uart_port *port = &sci_port->port; - unsigned short bits, ctrl; + unsigned short bits, ctrl, ctrl_temp; unsigned long flags; int locked = 1; @@ -2438,9 +2659,11 @@ static void serial_console_write(struct console *co, const char *s, else spin_lock(&port->lock); - /* first save the SCSCR then disable the interrupts */ + /* first save SCSCR then disable interrupts, keep clock source */ ctrl = serial_port_in(port, SCSCR); - serial_port_out(port, SCSCR, sci_port->cfg->scscr); + ctrl_temp = (sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) | + (ctrl & (SCSCR_CKE1 | SCSCR_CKE0)); + serial_port_out(port, SCSCR, ctrl_temp); uart_console_write(port, s, count, serial_console_putchar); @@ -2559,9 +2782,6 @@ static int sci_remove(struct platform_device *dev) { struct sci_port *port = platform_get_drvdata(dev); - cpufreq_unregister_notifier(&port->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); - uart_remove_one_port(&sci_uart_driver, &port->port); sci_cleanup_single(port); @@ -2569,42 +2789,44 @@ static int sci_remove(struct platform_device *dev) return 0; } -struct sci_port_info { - unsigned int type; - unsigned int regtype; -}; + +#define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype)) +#define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16) +#define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff) static const struct of_device_id of_sci_match[] = { + /* SoC-specific types */ + { + .compatible = "renesas,scif-r7s72100", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), + }, + /* Family-specific types */ + { + .compatible = "renesas,rcar-gen1-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, { + .compatible = "renesas,rcar-gen2-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, { + .compatible = "renesas,rcar-gen3-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, + /* Generic types */ { .compatible = "renesas,scif", - .data = &(const struct sci_port_info) { - .type = PORT_SCIF, - .regtype = SCIx_SH4_SCIF_REGTYPE, - }, + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE), }, { .compatible = "renesas,scifa", - .data = &(const struct sci_port_info) { - .type = PORT_SCIFA, - .regtype = SCIx_SCIFA_REGTYPE, - }, + .data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE), }, { .compatible = "renesas,scifb", - .data = &(const struct sci_port_info) { - .type = PORT_SCIFB, - .regtype = SCIx_SCIFB_REGTYPE, - }, + .data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE), }, { .compatible = "renesas,hscif", - .data = &(const struct sci_port_info) { - .type = PORT_HSCIF, - .regtype = SCIx_HSCIF_REGTYPE, - }, + .data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE), }, { .compatible = "renesas,sci", - .data = &(const struct sci_port_info) { - .type = PORT_SCI, - .regtype = SCIx_SCI_REGTYPE, - }, + .data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE), }, { /* Terminator */ }, @@ -2616,24 +2838,21 @@ sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; - const struct sci_port_info *info; struct plat_sci_port *p; int id; if (!IS_ENABLED(CONFIG_OF) || !np) return NULL; - match = of_match_node(of_sci_match, pdev->dev.of_node); + match = of_match_node(of_sci_match, np); if (!match) return NULL; - info = match->data; - p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL); if (!p) return NULL; - /* Get the line number for the aliases node. */ + /* Get the line number from the aliases node. */ id = of_alias_get_id(np, "serial"); if (id < 0) { dev_err(&pdev->dev, "failed to get alias id (%d)\n", id); @@ -2643,8 +2862,8 @@ sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id) *dev_id = id; p->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; - p->type = info->type; - p->regtype = info->regtype; + p->type = SCI_OF_TYPE(match->data); + p->regtype = SCI_OF_REGTYPE(match->data); p->scscr = SCSCR_RE | SCSCR_TE; return p; @@ -2714,16 +2933,6 @@ static int sci_probe(struct platform_device *dev) if (ret) return ret; - sp->freq_transition.notifier_call = sci_notifier; - - ret = cpufreq_register_notifier(&sp->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); - if (unlikely(ret < 0)) { - uart_remove_one_port(&sci_uart_driver, &sp->port); - sci_cleanup_single(sp); - return ret; - } - #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_gdb_detach(); #endif diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index bf69bbdcc1f9..fb1760250421 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -27,6 +27,8 @@ enum { HSSRR, /* Sampling Rate Register */ SCPCR, /* Serial Port Control Register */ SCPDR, /* Serial Port Data Register */ + SCDL, /* BRG Frequency Division Register */ + SCCKS, /* BRG Clock Select Register */ SCIx_NR_REGS, }; @@ -109,6 +111,14 @@ enum { #define SCPDR_RTSD BIT(4) /* Serial Port RTS Output Pin Data */ #define SCPDR_CTSD BIT(3) /* Serial Port CTS Input Pin Data */ +/* + * BRG Clock Select Register (Some SCIF and HSCIF) + * The Baud Rate Generator for external clock can provide a clock source for + * the sampling clock. It outputs either its frequency divided clock, or the + * (undivided) (H)SCK external clock. + */ +#define SCCKS_CKS BIT(15) /* Select (H)SCK (1) or divided SC_CLK (0) */ +#define SCCKS_XIN BIT(14) /* SC_CLK uses bus clock (1) or SCIF_CLK (0) */ #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 9dbae01d41ce..ef26c4a60be4 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -517,7 +517,7 @@ static struct uart_ops serial_sprd_ops = { }; #ifdef CONFIG_SERIAL_SPRD_CONSOLE -static inline void wait_for_xmitr(struct uart_port *port) +static void wait_for_xmitr(struct uart_port *port) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 064031870ba0..ca0d3802f2af 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port) uart_handle_dcd_change(port, 1); } - for (i = 0; i < bytes_read; i++) - uart_handle_sysrq_char(port, con_read_page[i]); + if (port->sysrq != 0 && *con_read_page) { + for (i = 0; i < bytes_read; i++) + uart_handle_sysrq_char(port, con_read_page[i]); + } if (port->state == NULL) continue; @@ -168,17 +170,17 @@ struct sunhv_ops { int (*receive_chars)(struct uart_port *port); }; -static struct sunhv_ops bychar_ops = { +static const struct sunhv_ops bychar_ops = { .transmit_chars = transmit_chars_putchar, .receive_chars = receive_chars_getchar, }; -static struct sunhv_ops bywrite_ops = { +static const struct sunhv_ops bywrite_ops = { .transmit_chars = transmit_chars_write, .receive_chars = receive_chars_read, }; -static struct sunhv_ops *sunhv_ops = &bychar_ops; +static const struct sunhv_ops *sunhv_ops = &bychar_ops; static struct tty_port *receive_chars(struct uart_port *port) { diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index e124d2e88996..9ad98eaa35bf 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1262,7 +1262,7 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up) /* * Wait for transmitter & holding register to empty */ -static __inline__ void wait_for_xmitr(struct uart_sunsu_port *up) +static void wait_for_xmitr(struct uart_sunsu_port *up) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 4079ec56f5f9..b384060e3b1f 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -485,7 +485,7 @@ static struct uart_driver vt8500_uart_driver; #ifdef CONFIG_SERIAL_VT8500_CONSOLE -static inline void wait_for_xmitr(struct uart_port *port) +static void wait_for_xmitr(struct uart_port *port) { unsigned int status, tmout = 10000; diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 6fc39fbfc275..5505ea842179 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -89,7 +89,7 @@ * module identification */ static char *driver_name = "SyncLink GT"; -static char *tty_driver_name = "synclink_gt"; +static char *slgt_driver_name = "synclink_gt"; static char *tty_dev_prefix = "ttySLG"; MODULE_LICENSE("GPL"); #define MGSL_MAGIC 0x5401 @@ -3799,7 +3799,7 @@ static int __init slgt_init(void) /* Initialize the tty_driver structure */ - serial_driver->driver_name = tty_driver_name; + serial_driver->driver_name = slgt_driver_name; serial_driver->name = tty_dev_prefix; serial_driver->major = ttymajor; serial_driver->minor_start = 64; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 5381a728d23e..e5139402e7f8 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -133,6 +133,12 @@ static void sysrq_handle_crash(int key) { char *killer = NULL; + /* we need to release the RCU read lock here, + * otherwise we get an annoying + * 'BUG: sleeping function called from invalid context' + * complaint from the kernel before the panic. + */ + rcu_read_unlock(); panic_on_oops = 1; /* force panic */ wmb(); *killer = 1; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 9a479e61791a..3cd31e0d4bd9 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) count = disc->ops->receive_buf2(tty, p, f, count); else { count = min_t(int, count, tty->receive_room); - if (count) + if (count && disc->ops->receive_buf) disc->ops->receive_buf(tty, p, f, count); } return count; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index bcc8e1e8bb72..892c92354745 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -256,19 +256,24 @@ const char *tty_name(const struct tty_struct *tty) EXPORT_SYMBOL(tty_name); -int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, +const char *tty_driver_name(const struct tty_struct *tty) +{ + if (!tty || !tty->driver) + return ""; + return tty->driver->name; +} + +static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine) { #ifdef TTY_PARANOIA_CHECK if (!tty) { - printk(KERN_WARNING - "null TTY for (%d:%d) in %s\n", + pr_warn("(%d:%d): %s: NULL tty\n", imajor(inode), iminor(inode), routine); return 1; } if (tty->magic != TTY_MAGIC) { - printk(KERN_WARNING - "bad magic number for tty struct (%d:%d) in %s\n", + pr_warn("(%d:%d): %s: bad magic number\n", imajor(inode), iminor(inode), routine); return 1; } @@ -293,9 +298,8 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) tty->link && tty->link->count) count++; if (tty->count != count) { - printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) " - "!= #fd's(%d) in %s\n", - tty->name, tty->count, count, routine); + tty_warn(tty, "%s: tty->count(%d) != #fd's(%d)\n", + routine, tty->count, count); return count; } #endif @@ -420,10 +424,8 @@ int __tty_check_change(struct tty_struct *tty, int sig) } rcu_read_unlock(); - if (!tty_pgrp) { - pr_warn("%s: tty_check_change: sig=%d, tty->pgrp == NULL!\n", - tty_name(tty), sig); - } + if (!tty_pgrp) + tty_warn(tty, "sig=%d, tty->pgrp == NULL!\n", sig); return ret; } @@ -781,7 +783,7 @@ static void do_tty_hangup(struct work_struct *work) void tty_hangup(struct tty_struct *tty) { - tty_debug_hangup(tty, "\n"); + tty_debug_hangup(tty, "hangup\n"); schedule_work(&tty->hangup_work); } @@ -798,7 +800,7 @@ EXPORT_SYMBOL(tty_hangup); void tty_vhangup(struct tty_struct *tty) { - tty_debug_hangup(tty, "\n"); + tty_debug_hangup(tty, "vhangup\n"); __tty_hangup(tty, 0); } @@ -835,7 +837,7 @@ void tty_vhangup_self(void) static void tty_vhangup_session(struct tty_struct *tty) { - tty_debug_hangup(tty, "\n"); + tty_debug_hangup(tty, "session hangup\n"); __tty_hangup(tty, 1); } @@ -1239,8 +1241,7 @@ static ssize_t tty_write(struct file *file, const char __user *buf, return -EIO; /* Short term debug to catch buggy drivers */ if (tty->ops->write_room == NULL) - printk(KERN_ERR "tty driver %s lacks a write_room method.\n", - tty->driver->name); + tty_err(tty, "missing write_room method\n"); ld = tty_ldisc_ref_wait(tty); if (!ld->ops->write) ret = -EIO; @@ -1561,8 +1562,8 @@ err_module_put: /* call the tty release_tty routine to clean out this slot */ err_release_tty: tty_unlock(tty); - printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, " - "clearing slot %d\n", idx); + tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", + retval, idx); release_tty(tty, idx); return ERR_PTR(retval); } @@ -1580,10 +1581,8 @@ void tty_free_termios(struct tty_struct *tty) tp = tty->driver->termios[idx]; if (tp == NULL) { tp = kmalloc(sizeof(struct ktermios), GFP_KERNEL); - if (tp == NULL) { - pr_warn("tty: no memory to save termios state.\n"); + if (tp == NULL) return; - } tty->driver->termios[idx] = tp; } *tp = tty->termios; @@ -1788,7 +1787,7 @@ int tty_release(struct inode *inode, struct file *filp) return 0; } - tty_debug_hangup(tty, "(tty count=%d)...\n", tty->count); + tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count); if (tty->ops->close) tty->ops->close(tty, filp); @@ -1837,8 +1836,7 @@ int tty_release(struct inode *inode, struct file *filp) if (once) { once = 0; - printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", - __func__, tty_name(tty)); + tty_warn(tty, "read/write wait queue active!\n"); } schedule_timeout_killable(timeout); if (timeout < 120 * HZ) @@ -1849,14 +1847,12 @@ int tty_release(struct inode *inode, struct file *filp) if (o_tty) { if (--o_tty->count < 0) { - printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n", - __func__, o_tty->count, tty_name(o_tty)); + tty_warn(tty, "bad slave count (%d)\n", o_tty->count); o_tty->count = 0; } } if (--tty->count < 0) { - printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n", - __func__, tty->count, tty_name(tty)); + tty_warn(tty, "bad tty->count (%d)\n", tty->count); tty->count = 0; } @@ -1907,7 +1903,7 @@ int tty_release(struct inode *inode, struct file *filp) /* Wait for pending work before tty destruction commmences */ tty_flush_works(tty); - tty_debug_hangup(tty, "freeing structure...\n"); + tty_debug_hangup(tty, "freeing structure\n"); /* * The release_tty function takes care of the details of clearing * the slots and preserving the termios structure. The tty_unlock_pair @@ -2097,7 +2093,7 @@ retry_open: tty->driver->subtype == PTY_TYPE_MASTER) noctty = 1; - tty_debug_hangup(tty, "(tty count=%d)\n", tty->count); + tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); if (tty->ops->open) retval = tty->ops->open(tty, filp); @@ -2106,7 +2102,7 @@ retry_open: filp->f_flags = saved_flags; if (retval) { - tty_debug_hangup(tty, "error %d, releasing...\n", retval); + tty_debug_hangup(tty, "open error %d, releasing\n", retval); tty_unlock(tty); /* need to call tty_release without BTM */ tty_release(inode, filp); @@ -2870,7 +2866,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) no_tty(); return 0; case TIOCSCTTY: - return tiocsctty(tty, file, arg); + return tiocsctty(real_tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: @@ -3028,28 +3024,24 @@ void __do_SAK(struct tty_struct *tty) read_lock(&tasklist_lock); /* Kill the entire session */ do_each_pid_task(session, PIDTYPE_SID, p) { - printk(KERN_NOTICE "SAK: killed process %d" - " (%s): task_session(p)==tty->session\n", - task_pid_nr(p), p->comm); + tty_notice(tty, "SAK: killed process %d (%s): by session\n", + task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); } while_each_pid_task(session, PIDTYPE_SID, p); - /* Now kill any processes that happen to have the - * tty open. - */ + + /* Now kill any processes that happen to have the tty open */ do_each_thread(g, p) { if (p->signal->tty == tty) { - printk(KERN_NOTICE "SAK: killed process %d" - " (%s): task_session(p)==tty->session\n", - task_pid_nr(p), p->comm); + tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n", + task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); continue; } task_lock(p); i = iterate_fd(p->files, 0, this_tty, tty); if (i != 0) { - printk(KERN_NOTICE "SAK: killed process %d" - " (%s): fd#%d opened to the tty\n", - task_pid_nr(p), p->comm, i - 1); + tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n", + task_pid_nr(p), p->comm, i - 1); force_sig(SIGKILL, p); } task_unlock(p); @@ -3219,7 +3211,7 @@ EXPORT_SYMBOL(tty_register_device); static void tty_device_create_release(struct device *dev) { - pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + dev_dbg(dev, "releasing...\n"); kfree(dev); } @@ -3255,8 +3247,8 @@ struct device *tty_register_device_attr(struct tty_driver *driver, bool cdev = false; if (index >= driver->num) { - printk(KERN_ERR "Attempt to register invalid tty line number " - " (%d).\n", index); + pr_err("%s: Attempt to register invalid tty line number (%d)\n", + driver->name, index); return ERR_PTR(-EINVAL); } diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 1445dd39aa62..0ea351388724 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -216,7 +216,7 @@ int tty_unthrottle_safe(struct tty_struct *tty) void tty_wait_until_sent(struct tty_struct *tty, long timeout) { - tty_debug_wait_until_sent(tty, "\n"); + tty_debug_wait_until_sent(tty, "wait until sent, timeout=%ld\n", timeout); if (!timeout) timeout = MAX_SCHEDULE_TIMEOUT; @@ -239,19 +239,14 @@ EXPORT_SYMBOL(tty_wait_until_sent); * Termios Helper Methods */ -static void unset_locked_termios(struct ktermios *termios, - struct ktermios *old, - struct ktermios *locked) +static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old) { + struct ktermios *termios = &tty->termios; + struct ktermios *locked = &tty->termios_locked; int i; #define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z))) - if (!locked) { - printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); - return; - } - NOSET_MASK(termios->c_iflag, old->c_iflag, locked->c_iflag); NOSET_MASK(termios->c_oflag, old->c_oflag, locked->c_oflag); NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); @@ -463,10 +458,8 @@ void tty_termios_encode_baud_rate(struct ktermios *termios, if (ifound == -1 && (ibaud != obaud || ibinput)) termios->c_cflag |= (BOTHER << IBSHIFT); #else - if (ifound == -1 || ofound == -1) { - printk_once(KERN_WARNING "tty: Unable to return correct " - "speed data as your architecture needs updating.\n"); - } + if (ifound == -1 || ofound == -1) + pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n"); #endif } EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate); @@ -556,7 +549,7 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) down_write(&tty->termios_rwsem); old_termios = tty->termios; tty->termios = *new_termios; - unset_locked_termios(&tty->termios, &old_termios, &tty->termios_locked); + unset_locked_termios(tty, &old_termios); if (tty->ops->set_termios) tty->ops->set_termios(tty, &old_termios); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 629e3c865072..a054d03c22e7 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -185,7 +185,7 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) * * Complement of tty_ldisc_get(). */ -static inline void tty_ldisc_put(struct tty_ldisc *ld) +static void tty_ldisc_put(struct tty_ldisc *ld) { if (WARN_ON_ONCE(!ld)) return; @@ -417,6 +417,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); * they are not on hot paths so a little discipline won't do * any harm. * + * The line discipline-related tty_struct fields are reset to + * prevent the ldisc driver from re-using stale information for + * the new ldisc instance. + * * Locking: takes termios_rwsem */ @@ -425,6 +429,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) down_write(&tty->termios_rwsem); tty->termios.c_line = num; up_write(&tty->termios_rwsem); + + tty->disc_data = NULL; + tty->receive_room = 0; } /** @@ -529,34 +536,21 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_lock(tty); retval = tty_ldisc_lock(tty, 5 * HZ); - if (retval) { - tty_ldisc_put(new_ldisc); - tty_unlock(tty); - return retval; - } + if (retval) + goto err; - /* - * Check the no-op case - */ + /* Check the no-op case */ + if (tty->ldisc->ops->num == ldisc) + goto out; - if (tty->ldisc->ops->num == ldisc) { - tty_ldisc_unlock(tty); - tty_ldisc_put(new_ldisc); - tty_unlock(tty); - return 0; + if (test_bit(TTY_HUPPED, &tty->flags)) { + /* We were raced by hangup */ + retval = -EIO; + goto out; } old_ldisc = tty->ldisc; - if (test_bit(TTY_HUPPED, &tty->flags)) { - /* We were raced by the hangup method. It will have stomped - the ldisc data and closed the ldisc down */ - tty_ldisc_unlock(tty); - tty_ldisc_put(new_ldisc); - tty_unlock(tty); - return -EIO; - } - /* Shutdown the old discipline. */ tty_ldisc_close(tty, old_ldisc); @@ -582,18 +576,15 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) the old ldisc (if it was restored as part of error cleanup above). In either case, releasing a single reference from the old ldisc is correct. */ - - tty_ldisc_put(old_ldisc); - - /* - * Allow ldisc referencing to occur again - */ + new_ldisc = old_ldisc; +out: tty_ldisc_unlock(tty); /* Restart the work queue in case no characters kick it off. Safe if already running */ tty_buffer_restart_work(tty->port); - +err: + tty_ldisc_put(new_ldisc); /* drop the extra reference */ tty_unlock(tty); return retval; } diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index ad7eba5ca380..1bf8ed13f827 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -319,7 +319,7 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout) -static inline int __ldsem_down_read_nested(struct ld_semaphore *sem, +static int __ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; @@ -338,7 +338,7 @@ static inline int __ldsem_down_read_nested(struct ld_semaphore *sem, return 1; } -static inline int __ldsem_down_write_nested(struct ld_semaphore *sem, +static int __ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c index 0efcf713b756..77703a391207 100644 --- a/drivers/tty/tty_mutex.c +++ b/drivers/tty/tty_mutex.c @@ -12,11 +12,8 @@ void __lockfunc tty_lock(struct tty_struct *tty) { - if (tty->magic != TTY_MAGIC) { - pr_err("L Bad %p\n", tty); - WARN_ON(1); + if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) return; - } tty_kref_get(tty); mutex_lock(&tty->legacy_mutex); } @@ -24,11 +21,8 @@ EXPORT_SYMBOL(tty_lock); void __lockfunc tty_unlock(struct tty_struct *tty) { - if (tty->magic != TTY_MAGIC) { - pr_err("U Bad %p\n", tty); - WARN_ON(1); + if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty)) return; - } mutex_unlock(&tty->legacy_mutex); tty_kref_put(tty); } diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 482f33f20043..846ed481c24f 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -462,14 +462,13 @@ int tty_port_close_start(struct tty_port *port, spin_lock_irqsave(&port->lock, flags); if (tty->count == 1 && port->count != 1) { - printk(KERN_WARNING - "tty_port_close_start: tty->count = 1 port count = %d.\n", - port->count); + tty_warn(tty, "%s: tty->count = 1 port count = %d\n", __func__, + port->count); port->count = 1; } if (--port->count < 0) { - printk(KERN_WARNING "tty_port_close_start: count = %d\n", - port->count); + tty_warn(tty, "%s: bad port count (%d)\n", __func__, + port->count); port->count = 0; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 4462d167900c..e7cbc44eef57 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -634,7 +634,7 @@ static void set_origin(struct vc_data *vc) vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x; } -static inline void save_screen(struct vc_data *vc) +static void save_screen(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 5619b8ca3bf3..3644a3500b70 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -37,9 +37,4 @@ config USB_CHIPIDEA_HOST Say Y here to enable host controller functionality of the ChipIdea driver. -config USB_CHIPIDEA_DEBUG - bool "ChipIdea driver debug" - help - Say Y here to enable debugging output of the ChipIdea driver. - endif diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile index 4decb12f2578..518e445476c3 100644 --- a/drivers/usb/chipidea/Makefile +++ b/drivers/usb/chipidea/Makefile @@ -1,11 +1,8 @@ -ccflags-$(CONFIG_USB_CHIPIDEA_DEBUG) := -DDEBUG - obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o -ci_hdrc-y := core.o otg.o +ci_hdrc-y := core.o otg.o debug.o ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o -ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG) += debug.o ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o # Glue/Bridge layers go here diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index 41d7cf6d63ba..cd414559040f 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -433,4 +433,7 @@ int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask, void ci_platform_configure(struct ci_hdrc *ci); +int dbg_create_files(struct ci_hdrc *ci); + +void dbg_remove_files(struct ci_hdrc *ci); #endif /* __DRIVERS_USB_CHIPIDEA_CI_H */ diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 5a048b7b92e8..f14f4ab47ebb 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -345,6 +345,11 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) return 0; } +static void ci_hdrc_imx_shutdown(struct platform_device *pdev) +{ + ci_hdrc_imx_remove(pdev); +} + #ifdef CONFIG_PM static int imx_controller_suspend(struct device *dev) { @@ -462,6 +467,7 @@ static const struct dev_pm_ops ci_hdrc_imx_pm_ops = { static struct platform_driver ci_hdrc_imx_driver = { .probe = ci_hdrc_imx_probe, .remove = ci_hdrc_imx_remove, + .shutdown = ci_hdrc_imx_shutdown, .driver = { .name = "imx_usb", .of_match_table = ci_hdrc_imx_dt_ids, diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index d79ecc08a1be..3889809fd0c4 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -25,7 +25,8 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) case CI_HDRC_CONTROLLER_RESET_EVENT: dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n"); writel(0, USB_AHBBURST); - writel(0, USB_AHBMODE); + /* use AHB transactor, allow posted data writes */ + writel(0x8, USB_AHBMODE); usb_phy_init(ci->usb_phy); break; case CI_HDRC_CONTROLLER_STOPPED_EVENT: diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 965d0e240dcb..7404064b9bbc 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -23,7 +23,6 @@ * - BUS: bus glue code, bus abstraction layer * * Compile Options - * - CONFIG_USB_CHIPIDEA_DEBUG: enable debug facilities * - STALL_IN: non-empty bulk-in pipes cannot be halted * if defined mass storage compliance succeeds but with warnings * => case 4: Hi > Dn @@ -71,7 +70,6 @@ #include "udc.h" #include "bits.h" #include "host.h" -#include "debug.h" #include "otg.h" #include "otg_fsm.h" @@ -688,52 +686,39 @@ static int ci_get_platdata(struct device *dev, if (usb_get_maximum_speed(dev) == USB_SPEED_FULL) platdata->flags |= CI_HDRC_FORCE_FULLSPEED; - if (of_find_property(dev->of_node, "phy-clkgate-delay-us", NULL)) - of_property_read_u32(dev->of_node, "phy-clkgate-delay-us", + of_property_read_u32(dev->of_node, "phy-clkgate-delay-us", &platdata->phy_clkgate_delay_us); platdata->itc_setting = 1; - if (of_find_property(dev->of_node, "itc-setting", NULL)) { - ret = of_property_read_u32(dev->of_node, "itc-setting", - &platdata->itc_setting); - if (ret) { - dev_err(dev, - "failed to get itc-setting\n"); - return ret; - } - } - if (of_find_property(dev->of_node, "ahb-burst-config", NULL)) { - ret = of_property_read_u32(dev->of_node, "ahb-burst-config", - &platdata->ahb_burst_config); - if (ret) { - dev_err(dev, - "failed to get ahb-burst-config\n"); - return ret; - } + of_property_read_u32(dev->of_node, "itc-setting", + &platdata->itc_setting); + + ret = of_property_read_u32(dev->of_node, "ahb-burst-config", + &platdata->ahb_burst_config); + if (!ret) { platdata->flags |= CI_HDRC_OVERRIDE_AHB_BURST; + } else if (ret != -EINVAL) { + dev_err(dev, "failed to get ahb-burst-config\n"); + return ret; } - if (of_find_property(dev->of_node, "tx-burst-size-dword", NULL)) { - ret = of_property_read_u32(dev->of_node, "tx-burst-size-dword", - &platdata->tx_burst_size); - if (ret) { - dev_err(dev, - "failed to get tx-burst-size-dword\n"); - return ret; - } + ret = of_property_read_u32(dev->of_node, "tx-burst-size-dword", + &platdata->tx_burst_size); + if (!ret) { platdata->flags |= CI_HDRC_OVERRIDE_TX_BURST; + } else if (ret != -EINVAL) { + dev_err(dev, "failed to get tx-burst-size-dword\n"); + return ret; } - if (of_find_property(dev->of_node, "rx-burst-size-dword", NULL)) { - ret = of_property_read_u32(dev->of_node, "rx-burst-size-dword", - &platdata->rx_burst_size); - if (ret) { - dev_err(dev, - "failed to get rx-burst-size-dword\n"); - return ret; - } + ret = of_property_read_u32(dev->of_node, "rx-burst-size-dword", + &platdata->rx_burst_size); + if (!ret) { platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST; + } else if (ret != -EINVAL) { + dev_err(dev, "failed to get rx-burst-size-dword\n"); + return ret; } ext_id = ERR_PTR(-ENODEV); diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index 58c8485a0715..a4f7db2e18dd 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c @@ -15,7 +15,6 @@ #include "ci.h" #include "udc.h" #include "bits.h" -#include "debug.h" #include "otg.h" /** diff --git a/drivers/usb/chipidea/debug.h b/drivers/usb/chipidea/debug.h deleted file mode 100644 index e16478c4a943..000000000000 --- a/drivers/usb/chipidea/debug.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * debug.h - ChipIdea USB driver debug interfaces - * - * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved. - * - * Author: David Lopo - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __DRIVERS_USB_CHIPIDEA_DEBUG_H -#define __DRIVERS_USB_CHIPIDEA_DEBUG_H - -#ifdef CONFIG_USB_CHIPIDEA_DEBUG -int dbg_create_files(struct ci_hdrc *ci); -void dbg_remove_files(struct ci_hdrc *ci); -#else -static inline int dbg_create_files(struct ci_hdrc *ci) -{ - return 0; -} - -static inline void dbg_remove_files(struct ci_hdrc *ci) -{ -} -#endif - -#endif /* __DRIVERS_USB_CHIPIDEA_DEBUG_H */ diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 3d24304405b3..053bac9d983c 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -190,6 +190,8 @@ static void host_stop(struct ci_hdrc *ci) (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON)) regulator_disable(ci->platdata->reg_vbus); } + ci->hcd = NULL; + ci->otg.host = NULL; } diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 00ab59d45da1..ba90dc66703d 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -485,20 +485,30 @@ static void ci_otg_loc_conn(struct otg_fsm *fsm, int on) /* * Generate SOF by host. - * This is controlled through suspend/resume the port. * In host mode, controller will automatically send SOF. * Suspend will block the data on the port. + * + * This is controlled through usbcore by usb autosuspend, + * so the usb device class driver need support autosuspend, + * otherwise the bus suspend will not happen. */ static void ci_otg_loc_sof(struct otg_fsm *fsm, int on) { - struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); + struct usb_device *udev; - if (on) - hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_FPR, - PORTSC_FPR); - else - hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_SUSP, - PORTSC_SUSP); + if (!fsm->otg->host) + return; + + udev = usb_hub_find_child(fsm->otg->host->root_hub, 1); + if (!udev) + return; + + if (on) { + usb_disable_autosuspend(udev); + } else { + pm_runtime_set_autosuspend_delay(&udev->dev, 0); + usb_enable_autosuspend(udev); + } } /* diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 391a1225b0ba..3eafa2c9a2ba 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -26,7 +26,6 @@ #include "ci.h" #include "udc.h" #include "bits.h" -#include "debug.h" #include "otg.h" #include "otg_fsm.h" @@ -349,14 +348,13 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, if (node == NULL) return -ENOMEM; - node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC, + node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma); if (node->ptr == NULL) { kfree(node); return -ENOMEM; } - memset(node->ptr, 0, sizeof(struct ci_hw_td)); node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES)); node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES); node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE); @@ -404,9 +402,9 @@ static inline u8 _usb_addr(struct ci_hw_ep *ep) } /** - * _hardware_queue: configures a request at hardware level - * @gadget: gadget + * _hardware_enqueue: configures a request at hardware level * @hwep: endpoint + * @hwreq: request * * This function returns an error code */ @@ -435,19 +433,28 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) if (hwreq->req.dma % PAGE_SIZE) pages--; - if (rest == 0) - add_td_to_list(hwep, hwreq, 0); + if (rest == 0) { + ret = add_td_to_list(hwep, hwreq, 0); + if (ret < 0) + goto done; + } while (rest > 0) { unsigned count = min(hwreq->req.length - hwreq->req.actual, (unsigned)(pages * CI_HDRC_PAGE_SIZE)); - add_td_to_list(hwep, hwreq, count); + ret = add_td_to_list(hwep, hwreq, count); + if (ret < 0) + goto done; + rest -= count; } if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX - && (hwreq->req.length % hwep->ep.maxpacket == 0)) - add_td_to_list(hwep, hwreq, 0); + && (hwreq->req.length % hwep->ep.maxpacket == 0)) { + ret = add_td_to_list(hwep, hwreq, 0); + if (ret < 0) + goto done; + } firstnode = list_first_entry(&hwreq->tds, struct td_node, td); @@ -788,8 +795,12 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) /** * _ep_queue: queues (submits) an I/O request to an endpoint + * @ep: endpoint + * @req: request + * @gfp_flags: GFP flags (not used) * * Caller must hold lock + * This function returns an error code */ static int _ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t __maybe_unused gfp_flags) diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 673d53038ed2..e6ec125e4485 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -17,6 +17,7 @@ #include <linux/usb/ch9.h> #include <linux/usb/of.h> #include <linux/usb/otg.h> +#include <linux/of_platform.h> const char *usb_otg_state_string(enum usb_otg_state state) { @@ -106,25 +107,72 @@ static const char *const usb_dr_modes[] = { [USB_DR_MODE_OTG] = "otg", }; +static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++) + if (!strcmp(usb_dr_modes[i], str)) + return i; + + return USB_DR_MODE_UNKNOWN; +} + enum usb_dr_mode usb_get_dr_mode(struct device *dev) { const char *dr_mode; - int err, i; + int err; err = device_property_read_string(dev, "dr_mode", &dr_mode); if (err < 0) return USB_DR_MODE_UNKNOWN; - for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++) - if (!strcmp(dr_mode, usb_dr_modes[i])) - return i; - - return USB_DR_MODE_UNKNOWN; + return usb_get_dr_mode_from_string(dr_mode); } EXPORT_SYMBOL_GPL(usb_get_dr_mode); #ifdef CONFIG_OF /** + * of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device + * which is associated with the given phy device_node + * @np: Pointer to the given phy device_node + * + * In dts a usb controller associates with phy devices. The function gets + * the string from property 'dr_mode' of the controller associated with the + * given phy device node, and returns the correspondig enum usb_dr_mode. + */ +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +{ + struct device_node *controller = NULL; + struct device_node *phy; + const char *dr_mode; + int index; + int err; + + do { + controller = of_find_node_with_property(controller, "phys"); + index = 0; + do { + phy = of_parse_phandle(controller, "phys", index); + of_node_put(phy); + if (phy == phy_np) + goto finish; + index++; + } while (phy); + } while (controller); + +finish: + err = of_property_read_string(controller, "dr_mode", &dr_mode); + of_node_put(controller); + + if (err < 0) + return USB_DR_MODE_UNKNOWN; + + return usb_get_dr_mode_from_string(dr_mode); +} +EXPORT_SYMBOL_GPL(of_usb_get_dr_mode_by_phy); + +/** * of_usb_host_tpl_support - to get if Targeted Peripheral List is supported * for given targeted hosts (non-PC hosts) * @np: Pointer to the given device_node diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 2a3bbdf7eb94..cffa0a0d7de2 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -661,32 +661,8 @@ static unsigned int usb_device_poll(struct file *file, return 0; } -static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig) -{ - loff_t ret; - - mutex_lock(&file_inode(file)->i_mutex); - - switch (orig) { - case 0: - file->f_pos = offset; - ret = file->f_pos; - break; - case 1: - file->f_pos += offset; - ret = file->f_pos; - break; - case 2: - default: - ret = -EINVAL; - } - - mutex_unlock(&file_inode(file)->i_mutex); - return ret; -} - const struct file_operations usbfs_devices_fops = { - .llseek = usb_device_lseek, + .llseek = no_seek_end_llseek, .read = usb_device_read, .poll = usb_device_poll, }; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 38ae877c46e3..59e7a3369084 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -100,6 +100,11 @@ static bool usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); +static unsigned usbfs_snoop_max = 65536; +module_param(usbfs_snoop_max, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(usbfs_snoop_max, + "maximum number of bytes to print while snooping"); + #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ @@ -157,30 +162,6 @@ static int connected(struct usb_dev_state *ps) ps->dev->state != USB_STATE_NOTATTACHED); } -static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig) -{ - loff_t ret; - - mutex_lock(&file_inode(file)->i_mutex); - - switch (orig) { - case 0: - file->f_pos = offset; - ret = file->f_pos; - break; - case 1: - file->f_pos += offset; - ret = file->f_pos; - break; - case 2: - default: - ret = -EINVAL; - } - - mutex_unlock(&file_inode(file)->i_mutex); - return ret; -} - static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { @@ -392,6 +373,7 @@ static void snoop_urb(struct usb_device *udev, ep, t, d, length, timeout_or_status); } + data_len = min(data_len, usbfs_snoop_max); if (data && data_len > 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, data, data_len, 1); @@ -402,7 +384,8 @@ static void snoop_urb_data(struct urb *urb, unsigned len) { int i, size; - if (!usbfs_snoop) + len = min(len, usbfs_snoop_max); + if (!usbfs_snoop || len == 0) return; if (urb->num_sgs == 0) { @@ -1709,8 +1692,12 @@ static struct async *reap_as(struct usb_dev_state *ps) static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); + if (as) { - int retval = processcompl(as, (void __user * __user *)arg); + int retval; + + snoop(&ps->dev->dev, "reap %p\n", as->userurb); + retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } @@ -1726,6 +1713,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) as = async_getcompleted(ps); if (as) { + snoop(&ps->dev->dev, "reap %p\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); } else { @@ -1852,8 +1840,12 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); + if (as) { - int retval = processcompl_compat(as, (void __user * __user *)arg); + int retval; + + snoop(&ps->dev->dev, "reap %p\n", as->userurb); + retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } @@ -1869,6 +1861,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar as = async_getcompleted(ps); if (as) { + snoop(&ps->dev->dev, "reap %p\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } else { @@ -2273,7 +2266,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, #endif case USBDEVFS_DISCARDURB: - snoop(&dev->dev, "%s: DISCARDURB\n", __func__); + snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); ret = proc_unlinkurb(ps, p); break; @@ -2366,7 +2359,7 @@ static unsigned int usbdev_poll(struct file *file, const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, - .llseek = usbdev_lseek, + .llseek = no_seek_end_llseek, .read = usbdev_read, .poll = usbdev_poll, .unlocked_ioctl = usbdev_ioctl, diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1c102d60cd9f..df0e3b92533a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3000,7 +3000,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) -struct usb_mon_operations *mon_ops; +const struct usb_mon_operations *mon_ops; /* * The registration is unlocked. @@ -3010,7 +3010,7 @@ struct usb_mon_operations *mon_ops; * symbols from usbcore, usbcore gets referenced and cannot be unloaded first. */ -int usb_mon_register (struct usb_mon_operations *ops) +int usb_mon_register(const struct usb_mon_operations *ops) { if (mon_ops) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a5cc032ef77a..51b436918f78 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1035,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) unsigned delay; /* Continue a partial initialization */ - if (type == HUB_INIT2) - goto init2; - if (type == HUB_INIT3) + if (type == HUB_INIT2 || type == HUB_INIT3) { + device_lock(hub->intfdev); + + /* Was the hub disconnected while we were waiting? */ + if (hub->disconnected) { + device_unlock(hub->intfdev); + kref_put(&hub->kref, hub_release); + return; + } + if (type == HUB_INIT2) + goto init2; goto init3; + } + kref_get(&hub->kref); /* The superspeed hub except for root hub has to use Hub Depth * value as an offset into the route string to locate the bits @@ -1236,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); + device_unlock(hub->intfdev); return; /* Continues at init3: below */ } else { msleep(delay); @@ -1257,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Allow autosuspend if it was suppressed */ if (type <= HUB_INIT3) usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); + + if (type == HUB_INIT2 || type == HUB_INIT3) + device_unlock(hub->intfdev); + + kref_put(&hub->kref, hub_release); } /* Implement the continuations for the delays above */ @@ -3308,7 +3324,7 @@ static int finish_port_resume(struct usb_device *udev) /* * There are some SS USB devices which take longer time for link training. * XHCI specs 4.19.4 says that when Link training is successful, port - * sets CSC bit to 1. So if SW reads port status before successful link + * sets CCS bit to 1. So if SW reads port status before successful link * training, then it will not find device to be present. * USB Analyzer log with such buggy devices show that in some cases * device switch on the RX termination after long delay of host enabling @@ -3319,14 +3335,17 @@ static int finish_port_resume(struct usb_device *udev) * routine implements a 2000 ms timeout for link training. If in a case * link trains before timeout, loop will exit earlier. * + * There are also some 2.0 hard drive based devices and 3.0 thumb + * drives that, when plugged into a 2.0 only port, take a long + * time to set CCS after VBUS enable. + * * FIXME: If a device was connected before suspend, but was removed * while system was asleep, then the loop in the following routine will * only exit at timeout. * - * This routine should only be called when persist is enabled for a SS - * device. + * This routine should only be called when persist is enabled. */ -static int wait_for_ss_port_enable(struct usb_device *udev, +static int wait_for_connected(struct usb_device *udev, struct usb_hub *hub, int *port1, u16 *portchange, u16 *portstatus) { @@ -3339,6 +3358,7 @@ static int wait_for_ss_port_enable(struct usb_device *udev, delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); } + dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms); return status; } @@ -3438,8 +3458,8 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) } } - if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) - status = wait_for_ss_port_enable(udev, hub, &port1, &portchange, + if (udev->persist_enabled) + status = wait_for_connected(udev, hub, &port1, &portchange, &portstatus); status = check_port_resume_type(udev, @@ -3879,17 +3899,30 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, return; } - if (usb_set_lpm_timeout(udev, state, timeout)) + if (usb_set_lpm_timeout(udev, state, timeout)) { /* If we can't set the parent hub U1/U2 timeout, * device-initiated LPM won't be allowed either, so let the xHCI * host know that this link state won't be enabled. */ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); + } else { + /* Only a configured device will accept the Set Feature + * U1/U2_ENABLE + */ + if (udev->actconfig) + usb_set_device_initiated_lpm(udev, state, true); - /* Only a configured device will accept the Set Feature U1/U2_ENABLE */ - else if (udev->actconfig) - usb_set_device_initiated_lpm(udev, state, true); - + /* As soon as usb_set_lpm_timeout(timeout) returns 0, the + * hub-initiated LPM is enabled. Thus, LPM is enabled no + * matter the result of usb_set_device_initiated_lpm(). + * The only difference is whether device is able to initiate + * LPM. + */ + if (state == USB3_LPM_U1) + udev->usb3_lpm_u1_enabled = 1; + else if (state == USB3_LPM_U2) + udev->usb3_lpm_u2_enabled = 1; + } } /* @@ -3929,6 +3962,18 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " "bus schedule bandwidth may be impacted.\n", usb3_lpm_names[state]); + + /* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM + * is disabled. Hub will disallows link to enter U1/U2 as well, + * even device is initiating LPM. Hence LPM is disabled if hub LPM + * timeout set to 0, no matter device-initiated LPM is disabled or + * not. + */ + if (state == USB3_LPM_U1) + udev->usb3_lpm_u1_enabled = 0; + else if (state == USB3_LPM_U2) + udev->usb3_lpm_u2_enabled = 0; + return 0; } @@ -3963,8 +4008,6 @@ int usb_disable_lpm(struct usb_device *udev) if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) goto enable_lpm; - udev->usb3_lpm_enabled = 0; - return 0; enable_lpm: @@ -4001,6 +4044,8 @@ EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); void usb_enable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; + struct usb_hub *hub; + struct usb_port *port_dev; if (!udev || !udev->parent || udev->speed != USB_SPEED_SUPER || @@ -4020,10 +4065,17 @@ void usb_enable_lpm(struct usb_device *udev) if (udev->lpm_disable_count > 0) return; - usb_enable_link_state(hcd, udev, USB3_LPM_U1); - usb_enable_link_state(hcd, udev, USB3_LPM_U2); + hub = usb_hub_to_struct_hub(udev->parent); + if (!hub) + return; + + port_dev = hub->ports[udev->portnum - 1]; + + if (port_dev->usb3_lpm_u1_permit) + usb_enable_link_state(hcd, udev, USB3_LPM_U1); - udev->usb3_lpm_enabled = 1; + if (port_dev->usb3_lpm_u2_permit) + usb_enable_link_state(hcd, udev, USB3_LPM_U2); } EXPORT_SYMBOL_GPL(usb_enable_lpm); diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 688817fb3246..45d070dd1d03 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -92,6 +92,8 @@ struct usb_hub { * @status_lock: synchronize port_event() vs usb_port_{suspend|resume} * @portnum: port index num based one * @is_superspeed cache super-speed status + * @usb3_lpm_u1_permit: whether USB3 U1 LPM is permitted. + * @usb3_lpm_u2_permit: whether USB3 U2 LPM is permitted. */ struct usb_port { struct usb_device *child; @@ -104,6 +106,8 @@ struct usb_port { struct mutex status_lock; u8 portnum; unsigned int is_superspeed:1; + unsigned int usb3_lpm_u1_permit:1; + unsigned int usb3_lpm_u2_permit:1; }; #define to_usb_port(_dev) \ @@ -155,4 +159,3 @@ static inline int hub_port_debounce_be_stable(struct usb_hub *hub, { return hub_port_debounce(hub, port1, false); } - diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 5487fe308f01..460c855be0d0 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -50,6 +50,72 @@ static ssize_t connect_type_show(struct device *dev, } static DEVICE_ATTR_RO(connect_type); +static ssize_t usb3_lpm_permit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_port *port_dev = to_usb_port(dev); + const char *p; + + if (port_dev->usb3_lpm_u1_permit) { + if (port_dev->usb3_lpm_u2_permit) + p = "u1_u2"; + else + p = "u1"; + } else { + if (port_dev->usb3_lpm_u2_permit) + p = "u2"; + else + p = "0"; + } + + return sprintf(buf, "%s\n", p); +} + +static ssize_t usb3_lpm_permit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_port *port_dev = to_usb_port(dev); + struct usb_device *udev = port_dev->child; + struct usb_hcd *hcd; + + if (!strncmp(buf, "u1_u2", 5)) { + port_dev->usb3_lpm_u1_permit = 1; + port_dev->usb3_lpm_u2_permit = 1; + + } else if (!strncmp(buf, "u1", 2)) { + port_dev->usb3_lpm_u1_permit = 1; + port_dev->usb3_lpm_u2_permit = 0; + + } else if (!strncmp(buf, "u2", 2)) { + port_dev->usb3_lpm_u1_permit = 0; + port_dev->usb3_lpm_u2_permit = 1; + + } else if (!strncmp(buf, "0", 1)) { + port_dev->usb3_lpm_u1_permit = 0; + port_dev->usb3_lpm_u2_permit = 0; + } else + return -EINVAL; + + /* If device is connected to the port, disable or enable lpm + * to make new u1 u2 setting take effect immediately. + */ + if (udev) { + hcd = bus_to_hcd(udev->bus); + if (!hcd) + return -EINVAL; + usb_lock_device(udev); + mutex_lock(hcd->bandwidth_mutex); + if (!usb_disable_lpm(udev)) + usb_enable_lpm(udev); + mutex_unlock(hcd->bandwidth_mutex); + usb_unlock_device(udev); + } + + return count; +} +static DEVICE_ATTR_RW(usb3_lpm_permit); + static struct attribute *port_dev_attrs[] = { &dev_attr_connect_type.attr, NULL, @@ -64,6 +130,21 @@ static const struct attribute_group *port_dev_group[] = { NULL, }; +static struct attribute *port_dev_usb3_attrs[] = { + &dev_attr_usb3_lpm_permit.attr, + NULL, +}; + +static struct attribute_group port_dev_usb3_attr_grp = { + .attrs = port_dev_usb3_attrs, +}; + +static const struct attribute_group *port_dev_usb3_group[] = { + &port_dev_attr_grp, + &port_dev_usb3_attr_grp, + NULL, +}; + static void usb_port_device_release(struct device *dev) { struct usb_port *port_dev = to_usb_port(dev); @@ -401,6 +482,7 @@ static void find_and_link_peer(struct usb_hub *hub, int port1) int usb_hub_create_port_device(struct usb_hub *hub, int port1) { struct usb_port *port_dev; + struct usb_device *hdev = hub->hdev; int retval; port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL); @@ -417,7 +499,12 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1) port_dev->portnum = port1; set_bit(port1, hub->power_bits); port_dev->dev.parent = hub->intfdev; - port_dev->dev.groups = port_dev_group; + if (hub_is_superspeed(hdev)) { + port_dev->usb3_lpm_u1_permit = 1; + port_dev->usb3_lpm_u2_permit = 1; + port_dev->dev.groups = port_dev_usb3_group; + } else + port_dev->dev.groups = port_dev_group; port_dev->dev.type = &usb_port_device_type; port_dev->dev.driver = &usb_port_driver; if (hub_is_superspeed(hub->hdev)) diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d9ec2de6c4cf..65b6e6b84043 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -531,7 +531,7 @@ static ssize_t usb2_lpm_besl_store(struct device *dev, } static DEVICE_ATTR_RW(usb2_lpm_besl); -static ssize_t usb3_hardware_lpm_show(struct device *dev, +static ssize_t usb3_hardware_lpm_u1_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); @@ -539,7 +539,7 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev, usb_lock_device(udev); - if (udev->usb3_lpm_enabled) + if (udev->usb3_lpm_u1_enabled) p = "enabled"; else p = "disabled"; @@ -548,7 +548,26 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev, return sprintf(buf, "%s\n", p); } -static DEVICE_ATTR_RO(usb3_hardware_lpm); +static DEVICE_ATTR_RO(usb3_hardware_lpm_u1); + +static ssize_t usb3_hardware_lpm_u2_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *udev = to_usb_device(dev); + const char *p; + + usb_lock_device(udev); + + if (udev->usb3_lpm_u2_enabled) + p = "enabled"; + else + p = "disabled"; + + usb_unlock_device(udev); + + return sprintf(buf, "%s\n", p); +} +static DEVICE_ATTR_RO(usb3_hardware_lpm_u2); static struct attribute *usb2_hardware_lpm_attr[] = { &dev_attr_usb2_hardware_lpm.attr, @@ -562,7 +581,8 @@ static struct attribute_group usb2_hardware_lpm_attr_group = { }; static struct attribute *usb3_hardware_lpm_attr[] = { - &dev_attr_usb3_hardware_lpm.attr, + &dev_attr_usb3_hardware_lpm_u1.attr, + &dev_attr_usb3_hardware_lpm_u2.attr, NULL, }; static struct attribute_group usb3_hardware_lpm_attr_group = { @@ -592,7 +612,8 @@ static int add_power_attributes(struct device *dev) if (udev->usb2_hw_lpm_capable == 1) rc = sysfs_merge_group(&dev->kobj, &usb2_hardware_lpm_attr_group); - if (udev->lpm_capable == 1) + if (udev->speed == USB_SPEED_SUPER && + udev->lpm_capable == 1) rc = sysfs_merge_group(&dev->kobj, &usb3_hardware_lpm_attr_group); } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index f8bbd0b6d9fe..77e4c9bc0ab1 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -49,12 +49,7 @@ const char *usbcore_name = "usbcore"; static bool nousb; /* Disable USB when built into kernel image */ -/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */ -#ifdef MODULE module_param(nousb, bool, 0444); -#else -core_param(nousb, nousb, bool, 0444); -#endif /* * for external read access to <nousb> diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index ef73e498e98f..39a0fa8a4c0a 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -481,64 +481,168 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) * Do core a soft reset of the core. Be careful with this because it * resets all the internal state machines of the core. */ -static int dwc2_core_reset(struct dwc2_hsotg *hsotg) +int dwc2_core_reset(struct dwc2_hsotg *hsotg) { u32 greset; int count = 0; - u32 gusbcfg; dev_vdbg(hsotg->dev, "%s()\n", __func__); - /* Wait for AHB master IDLE state */ + /* Core Soft Reset */ + greset = dwc2_readl(hsotg->regs + GRSTCTL); + greset |= GRSTCTL_CSFTRST; + dwc2_writel(greset, hsotg->regs + GRSTCTL); do { - usleep_range(20000, 40000); + udelay(1); greset = dwc2_readl(hsotg->regs + GRSTCTL); if (++count > 50) { dev_warn(hsotg->dev, - "%s() HANG! AHB Idle GRSTCTL=%0x\n", + "%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset); return -EBUSY; } - } while (!(greset & GRSTCTL_AHBIDLE)); + } while (greset & GRSTCTL_CSFTRST); - /* Core Soft Reset */ + /* Wait for AHB master IDLE state */ count = 0; - greset |= GRSTCTL_CSFTRST; - dwc2_writel(greset, hsotg->regs + GRSTCTL); do { - usleep_range(20000, 40000); + udelay(1); greset = dwc2_readl(hsotg->regs + GRSTCTL); if (++count > 50) { dev_warn(hsotg->dev, - "%s() HANG! Soft Reset GRSTCTL=%0x\n", + "%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, greset); return -EBUSY; } - } while (greset & GRSTCTL_CSFTRST); + } while (!(greset & GRSTCTL_AHBIDLE)); - if (hsotg->dr_mode == USB_DR_MODE_HOST) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEDEVMODE; - gusbcfg |= GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - gusbcfg |= GUSBCFG_FORCEDEVMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } else if (hsotg->dr_mode == USB_DR_MODE_OTG) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - gusbcfg &= ~GUSBCFG_FORCEDEVMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } + return 0; +} + +/* + * Force the mode of the controller. + * + * Forcing the mode is needed for two cases: + * + * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the + * controller to stay in a particular mode regardless of ID pin + * changes. We do this usually after a core reset. + * + * 2) During probe we want to read reset values of the hw + * configuration registers that are only available in either host or + * device mode. We may need to force the mode if the current mode does + * not allow us to access the register in the mode that we want. + * + * In either case it only makes sense to force the mode if the + * controller hardware is OTG capable. + * + * Checks are done in this function to determine whether doing a force + * would be valid or not. + * + * If a force is done, it requires a 25ms delay to take effect. + * + * Returns true if the mode was forced. + */ +static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) +{ + u32 gusbcfg; + u32 set; + u32 clear; + + dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device"); + + /* + * Force mode has no effect if the hardware is not OTG. + */ + if (!dwc2_hw_is_otg(hsotg)) + return false; + + /* + * If dr_mode is either peripheral or host only, there is no + * need to ever force the mode to the opposite mode. + */ + if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)) + return false; + + if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST)) + return false; + + gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + + set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; + clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; + + /* + * If the force mode bit is already set, don't set it. + */ + if ((gusbcfg & set) && !(gusbcfg & clear)) + return false; + + gusbcfg &= ~clear; + gusbcfg |= set; + dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); + + msleep(25); + return true; +} + +/* + * Clears the force mode bits. + */ +static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) +{ + u32 gusbcfg; + + gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; + gusbcfg &= ~GUSBCFG_FORCEDEVMODE; + dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); /* * NOTE: This long sleep is _very_ important, otherwise the core will * not stay in host mode after a connector ID change! */ - usleep_range(150000, 200000); + msleep(25); +} + +/* + * Sets or clears force mode based on the dr_mode parameter. + */ +void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) +{ + switch (hsotg->dr_mode) { + case USB_DR_MODE_HOST: + dwc2_force_mode(hsotg, true); + break; + case USB_DR_MODE_PERIPHERAL: + dwc2_force_mode(hsotg, false); + break; + case USB_DR_MODE_OTG: + dwc2_clear_force_mode(hsotg); + break; + default: + dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n", + __func__, hsotg->dr_mode); + break; + } +} + +/* + * Do core a soft reset of the core. Be careful with this because it + * resets all the internal state machines of the core. + * + * Additionally this will apply force mode as per the hsotg->dr_mode + * parameter. + */ +int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg) +{ + int retval; + retval = dwc2_core_reset(hsotg); + if (retval) + return retval; + + dwc2_force_dr_mode(hsotg); return 0; } @@ -553,16 +657,20 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) */ if (select_phy) { dev_dbg(hsotg->dev, "FS PHY selected\n"); + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_PHYSEL; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + if (!(usbcfg & GUSBCFG_PHYSEL)) { + usbcfg |= GUSBCFG_PHYSEL; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - /* Reset after a PHY select */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s() Reset failed, aborting", - __func__); - return retval; + /* Reset after a PHY select */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } } } @@ -597,13 +705,13 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) { - u32 usbcfg; + u32 usbcfg, usbcfg_old; int retval = 0; if (!select_phy) return 0; - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG); /* * HS PHY parameters. These parameters are preserved during soft reset @@ -631,14 +739,16 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) break; } - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + if (usbcfg != usbcfg_old) { + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - /* Reset after setting the PHY parameters */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s() Reset failed, aborting", - __func__); - return retval; + /* Reset after setting the PHY parameters */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } } return retval; @@ -765,11 +875,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) * dwc2_core_init() - Initializes the DWC_otg controller registers and * prepares the core for device mode or host mode operation * - * @hsotg: Programming view of the DWC_otg controller - * @select_phy: If true then also set the Phy type - * @irq: If >= 0, the irq to register + * @hsotg: Programming view of the DWC_otg controller + * @initial_setup: If true then this is the first init for this instance. */ -int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) +int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) { u32 usbcfg, otgctl; int retval; @@ -791,18 +900,26 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - /* Reset the Controller */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", - __func__); - return retval; + /* + * Reset the Controller + * + * We only need to reset the controller if this is a re-init. + * For the first init we know for sure that earlier code reset us (it + * needed to in order to properly detect various parameters). + */ + if (!initial_setup) { + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", + __func__); + return retval; + } } /* * This needs to happen in FS mode before any other programming occurs */ - retval = dwc2_phy_init(hsotg, select_phy); + retval = dwc2_phy_init(hsotg, initial_setup); if (retval) return retval; @@ -1707,6 +1824,7 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, u32 hcchar; u32 hctsiz = 0; u16 num_packets; + u32 ec_mc; if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "%s()\n", __func__); @@ -1743,6 +1861,13 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & TSIZ_XFERSIZE_MASK; + + /* For split set ec_mc for immediate retries */ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + ec_mc = 3; + else + ec_mc = 1; } else { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "no split\n"); @@ -1805,6 +1930,9 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & TSIZ_XFERSIZE_MASK; + + /* The ec_mc gets the multi_count for non-split */ + ec_mc = chan->multi_count; } chan->start_pkt_count = num_packets; @@ -1855,8 +1983,7 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); hcchar &= ~HCCHAR_MULTICNT_MASK; - hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & - HCCHAR_MULTICNT_MASK; + hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); if (hcchar & HCCHAR_CHDIS) @@ -1905,7 +2032,6 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { u32 hcchar; - u32 hc_dma; u32 hctsiz = 0; if (chan->do_ping) @@ -1934,14 +2060,14 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; + dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, + chan->desc_list_sz, DMA_TO_DEVICE); + + dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num)); - /* Always start from first descriptor */ - hc_dma &= ~HCDMA_CTD_MASK; - dwc2_writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num)); if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", - hc_dma, chan->hc_num); + dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", + &chan->desc_list_addr, chan->hc_num); hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); hcchar &= ~HCCHAR_MULTICNT_MASK; @@ -2485,6 +2611,29 @@ void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) hsotg->core_params->dma_desc_enable = val; } +void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->core_params->dma_enable <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->core_params->dma_enable > 0 && + hsotg->hw_params.dma_desc_enable); + } + + hsotg->core_params->dma_desc_fs_enable = val; + dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); +} + void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, int val) { @@ -3016,6 +3165,7 @@ void dwc2_set_parameters(struct dwc2_hsotg *hsotg, dwc2_set_param_otg_cap(hsotg, params->otg_cap); dwc2_set_param_dma_enable(hsotg, params->dma_enable); dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); + dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); dwc2_set_param_host_support_fs_ls_low_power(hsotg, params->host_support_fs_ls_low_power); dwc2_set_param_enable_dynamic_fifo(hsotg, @@ -3052,17 +3202,93 @@ void dwc2_set_parameters(struct dwc2_hsotg *hsotg, dwc2_set_param_hibernation(hsotg, params->hibernation); } +/* + * Forces either host or device mode if the controller is not + * currently in that mode. + * + * Returns true if the mode was forced. + */ +static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) +{ + if (host && dwc2_is_host_mode(hsotg)) + return false; + else if (!host && dwc2_is_device_mode(hsotg)) + return false; + + return dwc2_force_mode(hsotg, host); +} + +/* + * Gets host hardware parameters. Forces host mode if not currently in + * host mode. Should be called immediately after a core soft reset in + * order to get the reset values. + */ +static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 gnptxfsiz; + u32 hptxfsiz; + bool forced; + + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + return; + + forced = dwc2_force_mode_if_needed(hsotg, true); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; + hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/* + * Gets device hardware parameters. Forces device mode if not + * currently in device mode. Should be called immediately after a core + * soft reset in order to get the reset values. + */ +static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + bool forced; + u32 gnptxfsiz; + + if (hsotg->dr_mode == USB_DR_MODE_HOST) + return; + + forced = dwc2_force_mode_if_needed(hsotg, false); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + /** * During device initialization, read various hardware configuration * registers and interpret the contents. + * + * This should be called during driver probe. It will perform a core + * soft reset in order to get the reset values of the parameters. */ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) { struct dwc2_hw_params *hw = &hsotg->hw_params; unsigned width; u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; - u32 hptxfsiz, grxfsiz, gnptxfsiz; - u32 gusbcfg; + u32 grxfsiz; + int retval; /* * Attempt to ensure this device is really a DWC_otg Controller. @@ -3082,6 +3308,10 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); + retval = dwc2_core_reset(hsotg); + if (retval) + return retval; + hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); @@ -3094,20 +3324,16 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); - /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */ - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg |= GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - usleep_range(100000, 150000); + /* + * Host specific hardware parameters. Reading these parameters + * requires the controller to be in host mode. The mode will + * be forced, if necessary, to read these values. + */ + dwc2_get_host_hwparams(hsotg); + dwc2_get_dev_hwparams(hsotg); - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - usleep_range(100000, 150000); + /* hwcfg1 */ + hw->dev_ep_dirs = hwcfg1; /* hwcfg2 */ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> @@ -3163,10 +3389,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) /* fifo sizes */ hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> GRXFSIZ_DEPTH_SHIFT; - hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; - hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; dev_dbg(hsotg->dev, "Detected values from hardware:\n"); dev_dbg(hsotg->dev, " op_mode=%d\n", @@ -3275,6 +3497,43 @@ void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); } +/* Returns the controller's GHWCFG2.OTG_MODE. */ +unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg) +{ + u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); + + return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >> + GHWCFG2_OP_MODE_SHIFT; +} + +/* Returns true if the controller is capable of DRD. */ +bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) || + (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) || + (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE); +} + +/* Returns true if the controller is host-only. */ +bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) || + (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST); +} + +/* Returns true if the controller is device-only. */ +bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || + (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE); +} + MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); MODULE_AUTHOR("Synopsys, Inc."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index a66d3cb62b65..7fb6434f4639 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -246,6 +246,13 @@ enum dwc2_ep0_state { * value for this if none is specified. * 0 - Address DMA * 1 - Descriptor DMA (default, if available) + * @dma_desc_fs_enable: When DMA mode is enabled, specifies whether to use + * address DMA mode or descriptor DMA mode for accessing + * the data FIFOs in Full Speed mode only. The driver + * will automatically detect the value for this if none is + * specified. + * 0 - Address DMA + * 1 - Descriptor DMA in FS (default, if available) * @speed: Specifies the maximum speed of operation in host and * device mode. The actual speed depends on the speed of * the attached device and the value of phy_type. @@ -375,6 +382,7 @@ struct dwc2_core_params { int otg_ver; int dma_enable; int dma_desc_enable; + int dma_desc_fs_enable; int speed; int enable_dynamic_fifo; int en_multiple_tx_fifo; @@ -451,15 +459,18 @@ struct dwc2_core_params { * 1 - 16 bits * 2 - 8 or 16 bits * @snpsid: Value from SNPSID register + * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) */ struct dwc2_hw_params { unsigned op_mode:3; unsigned arch:2; unsigned dma_desc_enable:1; + unsigned dma_desc_fs_enable:1; unsigned enable_dynamic_fifo:1; unsigned en_multiple_tx_fifo:1; unsigned host_rx_fifo_size:16; unsigned host_nperio_tx_fifo_size:16; + unsigned dev_nperio_tx_fifo_size:16; unsigned host_perio_tx_fifo_size:16; unsigned nperio_tx_q_depth:3; unsigned host_perio_tx_q_depth:3; @@ -476,6 +487,7 @@ struct dwc2_hw_params { unsigned power_optimized:1; unsigned utmi_phy_data_width:2; u32 snpsid; + u32 dev_ep_dirs; }; /* Size of control and EP0 buffers */ @@ -676,6 +688,9 @@ struct dwc2_hregs_backup { * @otg_port: OTG port number * @frame_list: Frame list * @frame_list_dma: Frame list DMA address + * @frame_list_sz: Frame list size + * @desc_gen_cache: Kmem cache for generic descriptors + * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors * * These are for peripheral mode: * @@ -770,6 +785,7 @@ struct dwc2_hsotg { u16 frame_number; u16 periodic_qh_count; bool bus_suspended; + bool new_connection; #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS #define FRAME_NUM_ARRAY_SIZE 1000 @@ -794,6 +810,9 @@ struct dwc2_hsotg { u8 otg_port; u32 *frame_list; dma_addr_t frame_list_dma; + u32 frame_list_sz; + struct kmem_cache *desc_gen_cache; + struct kmem_cache *desc_hsisoc_cache; #ifdef DEBUG u32 frrem_samples; @@ -864,10 +883,14 @@ enum dwc2_halt_status { * The following functions support initialization of the core driver component * and the DWC_otg controller */ +extern int dwc2_core_reset(struct dwc2_hsotg *hsotg); +extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg); extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); +void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); + /* * Host core Functions. * The following functions support managing the DWC_otg controller in host @@ -901,7 +924,7 @@ extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes); extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); -extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq); +extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup); extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); @@ -942,6 +965,16 @@ extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val); extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val); /* + * When DMA mode is enabled specifies whether to use + * address DMA or DMA Descritor mode with full speed devices + * for accessing the data FIFOs in host mode. + * 0 - address DMA + * 1 - FS DMA Descriptor(default, if available) + */ +extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, + int val); + +/* * Specifies the maximum speed of operation in host and device mode. * The actual speed depends on the speed of the attached device and * the value of phy_type. The actual speed depends on the speed of the @@ -1110,6 +1143,31 @@ extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); /* + * The following functions check the controller's OTG operation mode + * capability (GHWCFG2.OTG_MODE). + * + * These functions can be used before the internal hsotg->hw_params + * are read in and cached so they always read directly from the + * GHWCFG2 register. + */ +unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg); + +/* + * Returns the mode of operation, host or device + */ +static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg) +{ + return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0; +} +static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) +{ + return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0; +} + +/* * Dump core registers and SPRAM */ extern void dwc2_dump_dev_registers(struct dwc2_hsotg *hsotg); @@ -1154,12 +1212,14 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); -extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg); +extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); +extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg); #else static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) { return 0; } -static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) {} +static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {} +static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {} static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {} static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 27daa42788b1..d85c5c9f96c1 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -86,9 +86,6 @@ static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg) hprt0 &= ~HPRT0_ENA; dwc2_writel(hprt0, hsotg->regs + HPRT0); } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_PRTINT, hsotg->regs + GINTSTS); } /** @@ -98,11 +95,11 @@ static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg) { - dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n", - dwc2_is_host_mode(hsotg) ? "Host" : "Device"); - /* Clear interrupt */ dwc2_writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS); + + dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n", + dwc2_is_host_mode(hsotg) ? "Host" : "Device"); } /** @@ -239,7 +236,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "a_suspend->a_peripheral (%d)\n", hsotg->op_state); spin_unlock(&hsotg->lock); - dwc2_hcd_disconnect(hsotg); + dwc2_hcd_disconnect(hsotg, false); spin_lock(&hsotg->lock); hsotg->op_state = OTG_STATE_A_PERIPHERAL; } else { @@ -276,9 +273,13 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg) { - u32 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + u32 gintmsk; + + /* Clear interrupt */ + dwc2_writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS); /* Need to disable SOF interrupt immediately */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); gintmsk &= ~GINTSTS_SOF; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); @@ -295,9 +296,6 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg) queue_work(hsotg->wq_otg, &hsotg->wf_otg); spin_lock(&hsotg->lock); } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS); } /** @@ -315,12 +313,12 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) { int ret; - dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n", - hsotg->lx_state); - /* Clear interrupt */ dwc2_writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n", + hsotg->lx_state); + if (dwc2_is_device_mode(hsotg)) { if (hsotg->lx_state == DWC2_L2) { ret = dwc2_exit_hibernation(hsotg, true); @@ -347,6 +345,10 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) { int ret; + + /* Clear interrupt */ + dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n"); dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state); @@ -368,10 +370,9 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) /* Change to L0 state */ hsotg->lx_state = DWC2_L0; } else { - if (hsotg->core_params->hibernation) { - dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); + if (hsotg->core_params->hibernation) return; - } + if (hsotg->lx_state != DWC2_L1) { u32 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); @@ -385,9 +386,6 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) hsotg->lx_state = DWC2_L0; } } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); } /* @@ -396,14 +394,14 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) { + dwc2_writel(GINTSTS_DISCONNINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n", dwc2_is_host_mode(hsotg) ? "Host" : "Device", dwc2_op_state_str(hsotg)); if (hsotg->op_state == OTG_STATE_A_HOST) - dwc2_hcd_disconnect(hsotg); - - dwc2_writel(GINTSTS_DISCONNINT, hsotg->regs + GINTSTS); + dwc2_hcd_disconnect(hsotg, false); } /* @@ -419,6 +417,9 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) u32 dsts; int ret; + /* Clear interrupt */ + dwc2_writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "USB SUSPEND\n"); if (dwc2_is_device_mode(hsotg)) { @@ -437,7 +438,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) if (!dwc2_is_device_connected(hsotg)) { dev_dbg(hsotg->dev, "ignore suspend request before enumeration\n"); - goto clear_int; + return; } ret = dwc2_enter_hibernation(hsotg); @@ -476,10 +477,6 @@ skip_power_saving: hsotg->op_state = OTG_STATE_A_HOST; } } - -clear_int: - /* Clear interrupt */ - dwc2_writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS); } #define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0abf73c91beb..422ab7da4eb5 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2095,7 +2095,7 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) */ /* catch both EnumSpd_FS and EnumSpd_FS48 */ - switch (dsts & DSTS_ENUMSPD_MASK) { + switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { case DSTS_ENUMSPD_FS: case DSTS_ENUMSPD_FS48: hsotg->gadget.speed = USB_SPEED_FULL; @@ -2244,54 +2244,6 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) GINTSTS_RXFLVL) /** - * dwc2_hsotg_corereset - issue softreset to the core - * @hsotg: The device state - * - * Issue a soft reset to the core, and await the core finishing it. - */ -static int dwc2_hsotg_corereset(struct dwc2_hsotg *hsotg) -{ - int timeout; - u32 grstctl; - - dev_dbg(hsotg->dev, "resetting core\n"); - - /* issue soft reset */ - dwc2_writel(GRSTCTL_CSFTRST, hsotg->regs + GRSTCTL); - - timeout = 10000; - do { - grstctl = dwc2_readl(hsotg->regs + GRSTCTL); - } while ((grstctl & GRSTCTL_CSFTRST) && timeout-- > 0); - - if (grstctl & GRSTCTL_CSFTRST) { - dev_err(hsotg->dev, "Failed to get CSftRst asserted\n"); - return -EINVAL; - } - - timeout = 10000; - - while (1) { - u32 grstctl = dwc2_readl(hsotg->regs + GRSTCTL); - - if (timeout-- < 0) { - dev_info(hsotg->dev, - "%s: reset failed, GRSTCTL=%08x\n", - __func__, grstctl); - return -ETIMEDOUT; - } - - if (!(grstctl & GRSTCTL_AHBIDLE)) - continue; - - break; /* reset done */ - } - - dev_dbg(hsotg->dev, "reset successful\n"); - return 0; -} - -/** * dwc2_hsotg_core_init - issue softreset to the core * @hsotg: The device state * @@ -2307,7 +2259,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); if (!is_usb_reset) - if (dwc2_hsotg_corereset(hsotg)) + if (dwc2_core_reset(hsotg)) return; /* @@ -2585,7 +2537,7 @@ irq_retry: if (gintsts & GINTSTS_GOUTNAKEFF) { dev_info(hsotg->dev, "GOUTNakEff triggered\n"); - dwc2_writel(DCTL_CGOUTNAK, hsotg->regs + DCTL); + __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK); dwc2_hsotg_dump(hsotg); } @@ -2593,7 +2545,7 @@ irq_retry: if (gintsts & GINTSTS_GINNAKEFF) { dev_info(hsotg->dev, "GINNakEff triggered\n"); - dwc2_writel(DCTL_CGNPINNAK, hsotg->regs + DCTL); + __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); dwc2_hsotg_dump(hsotg); } @@ -2911,15 +2863,15 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, "%s: timeout DIEPINT.NAKEFF\n", __func__); } else { /* Clear any pending nak effect interrupt */ - dwc2_writel(GINTSTS_GINNAKEFF, hsotg->regs + GINTSTS); + dwc2_writel(GINTSTS_GOUTNAKEFF, hsotg->regs + GINTSTS); - __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); /* Wait for global nak to take effect */ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, - GINTSTS_GINNAKEFF, 100)) + GINTSTS_GOUTNAKEFF, 100)) dev_warn(hsotg->dev, - "%s: timeout GINTSTS.GINNAKEFF\n", __func__); + "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__); } /* Disable ep */ @@ -2944,7 +2896,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, /* TODO: Flush shared tx fifo */ } else { /* Remove global NAKs */ - __bic32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK); } } @@ -3403,8 +3355,8 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) /* check hardware configuration */ - cfg = dwc2_readl(hsotg->regs + GHWCFG2); - hsotg->num_of_eps = (cfg >> GHWCFG2_NUM_DEV_EP_SHIFT) & 0xF; + hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; + /* Add ep0 */ hsotg->num_of_eps++; @@ -3415,7 +3367,7 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) /* Same dwc2_hsotg_ep is used in both directions for ep0 */ hsotg->eps_out[0] = hsotg->eps_in[0]; - cfg = dwc2_readl(hsotg->regs + GHWCFG1); + cfg = hsotg->hw_params.dev_ep_dirs; for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { ep_type = cfg & 3; /* Direction in or both */ @@ -3434,11 +3386,8 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) } } - cfg = dwc2_readl(hsotg->regs + GHWCFG3); - hsotg->fifo_mem = (cfg >> GHWCFG3_DFIFO_DEPTH_SHIFT); - - cfg = dwc2_readl(hsotg->regs + GHWCFG4); - hsotg->dedicated_fifos = (cfg >> GHWCFG4_DED_FIFO_SHIFT) & 1; + hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; + hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", hsotg->num_of_eps, @@ -3563,6 +3512,17 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); /* Device tree specific probe */ dwc2_hsotg_of_probe(hsotg); + + /* Check against largest possible value. */ + if (hsotg->g_np_g_tx_fifo_sz > + hsotg->hw_params.dev_nperio_tx_fifo_size) { + dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n", + hsotg->g_np_g_tx_fifo_sz, + hsotg->hw_params.dev_nperio_tx_fifo_size); + hsotg->g_np_g_tx_fifo_sz = + hsotg->hw_params.dev_nperio_tx_fifo_size; + } + /* Dump fifo information */ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", hsotg->g_np_g_tx_fifo_sz); @@ -3579,31 +3539,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) hsotg->op_state = OTG_STATE_B_PERIPHERAL; - /* - * Force Device mode before initialization. - * This allows correctly configuring fifo for device mode. - */ - __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEHOSTMODE); - __orr32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE); - - /* - * According to Synopsys databook, this sleep is needed for the force - * device mode to take effect. - */ - msleep(25); - - dwc2_hsotg_corereset(hsotg); ret = dwc2_hsotg_hw_cfg(hsotg); if (ret) { dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); return ret; } - dwc2_hsotg_init(hsotg); - - /* Switch back to default configuration */ - __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE); - hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); if (!hsotg->ctrl_buff) { diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 571c21727ff9..8847c72e55f6 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -268,15 +268,33 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) } /** + * dwc2_hcd_connect() - Handles connect of the HCD + * + * @hsotg: Pointer to struct dwc2_hsotg + * + * Must be called with interrupt disabled and spinlock held + */ +void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) +{ + if (hsotg->lx_state != DWC2_L0) + usb_hcd_resume_root_hub(hsotg->priv); + + hsotg->flags.b.port_connect_status_change = 1; + hsotg->flags.b.port_connect_status = 1; +} + +/** * dwc2_hcd_disconnect() - Handles disconnect of the HCD * * @hsotg: Pointer to struct dwc2_hsotg + * @force: If true, we won't try to reconnect even if we see device connected. * * Must be called with interrupt disabled and spinlock held */ -void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) +void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) { u32 intr; + u32 hprt0; /* Set status flags for the hub driver */ hsotg->flags.b.port_connect_status_change = 1; @@ -315,6 +333,24 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) dwc2_hcd_cleanup_channels(hsotg); dwc2_host_disconnect(hsotg); + + /* + * Add an extra check here to see if we're actually connected but + * we don't have a detection interrupt pending. This can happen if: + * 1. hardware sees connect + * 2. hardware sees disconnect + * 3. hardware sees connect + * 4. dwc2_port_intr() - clears connect interrupt + * 5. dwc2_handle_common_intr() - calls here + * + * Without the extra check here we will end calling disconnect + * and won't get any future interrupts to handle the connect. + */ + if (!force) { + hprt0 = dwc2_readl(hsotg->regs + HPRT0); + if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS)) + dwc2_hcd_connect(hsotg); + } } /** @@ -881,8 +917,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) */ chan->multi_count = dwc2_hb_mult(qh->maxp); - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->core_params->dma_desc_enable > 0) { chan->desc_list_addr = qh->desc_list_dma; + chan->desc_list_sz = qh->desc_list_sz; + } dwc2_hc_init(hsotg, chan); chan->qh = qh; @@ -1382,7 +1420,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work) dev_err(hsotg->dev, "Connection id status change timed out\n"); hsotg->op_state = OTG_STATE_B_PERIPHERAL; - dwc2_core_init(hsotg, false, -1); + dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); spin_lock_irqsave(&hsotg->lock, flags); dwc2_hsotg_core_init_disconnected(hsotg, false); @@ -1405,7 +1443,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work) hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ - dwc2_core_init(hsotg, false, -1); + dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); dwc2_hcd_start(hsotg); } @@ -1734,6 +1772,28 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_TEST; /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ + if (hsotg->core_params->dma_desc_fs_enable) { + /* + * Enable descriptor DMA only if a full speed + * device is connected. + */ + if (hsotg->new_connection && + ((port_status & + (USB_PORT_STAT_CONNECTION | + USB_PORT_STAT_HIGH_SPEED | + USB_PORT_STAT_LOW_SPEED)) == + USB_PORT_STAT_CONNECTION)) { + u32 hcfg; + + dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); + hsotg->core_params->dma_desc_enable = 1; + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + hsotg->new_connection = false; + } + } + dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); *(__le32 *)buf = cpu_to_le32(port_status); break; @@ -2298,13 +2358,19 @@ static void dwc2_hcd_reset_func(struct work_struct *work) { struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, reset_work.work); + unsigned long flags; u32 hprt0; dev_dbg(hsotg->dev, "USB RESET function called\n"); + + spin_lock_irqsave(&hsotg->lock, flags); + hprt0 = dwc2_read_hprt0(hsotg); hprt0 &= ~HPRT0_RST; dwc2_writel(hprt0, hsotg->regs + HPRT0); hsotg->flags.b.port_reset_change = 1; + + spin_unlock_irqrestore(&hsotg->lock, flags); } /* @@ -2366,7 +2432,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd) spin_lock_irqsave(&hsotg->lock, flags); /* Ensure hcd is disconnected */ - dwc2_hcd_disconnect(hsotg); + dwc2_hcd_disconnect(hsotg, true); dwc2_hcd_stop(hsotg); hsotg->lx_state = DWC2_L3; hcd->state = HC_STATE_HALT; @@ -3054,7 +3120,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) dwc2_disable_global_interrupts(hsotg); /* Initialize the DWC_otg core, and select the Phy type */ - retval = dwc2_core_init(hsotg, true, irq); + retval = dwc2_core_init(hsotg, true); if (retval) goto error2; @@ -3122,6 +3188,47 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (!hsotg->status_buf) goto error3; + /* + * Create kmem caches to handle descriptor buffers in descriptor + * DMA mode. + * Alignment must be set to 512 bytes. + */ + if (hsotg->core_params->dma_desc_enable || + hsotg->core_params->dma_desc_fs_enable) { + hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc", + sizeof(struct dwc2_hcd_dma_desc) * + MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA, + NULL); + if (!hsotg->desc_gen_cache) { + dev_err(hsotg->dev, + "unable to create dwc2 generic desc cache\n"); + + /* + * Disable descriptor dma mode since it will not be + * usable. + */ + hsotg->core_params->dma_desc_enable = 0; + hsotg->core_params->dma_desc_fs_enable = 0; + } + + hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", + sizeof(struct dwc2_hcd_dma_desc) * + MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL); + if (!hsotg->desc_hsisoc_cache) { + dev_err(hsotg->dev, + "unable to create dwc2 hs isoc desc cache\n"); + + kmem_cache_destroy(hsotg->desc_gen_cache); + + /* + * Disable descriptor dma mode since it will not be + * usable. + */ + hsotg->core_params->dma_desc_enable = 0; + hsotg->core_params->dma_desc_fs_enable = 0; + } + } + hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; @@ -3145,7 +3252,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) */ retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval < 0) - goto error3; + goto error4; device_wakeup_enable(hcd->self.controller); @@ -3155,6 +3262,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) return 0; +error4: + kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->desc_hsisoc_cache); error3: dwc2_hcd_release(hsotg); error2: @@ -3195,6 +3305,10 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) usb_remove_hcd(hcd); hsotg->priv = NULL; + + kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->desc_hsisoc_cache); + dwc2_hcd_release(hsotg); usb_put_hcd(hcd); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index f105bada2fd1..8f0a29cefdf7 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -107,6 +107,7 @@ struct dwc2_qh; * @qh: QH for the transfer being processed by this channel * @hc_list_entry: For linking to list of host channels * @desc_list_addr: Current QH's descriptor list DMA address + * @desc_list_sz: Current QH's descriptor list size * * This structure represents the state of a single host channel when acting in * host mode. It contains the data items needed to transfer packets to an @@ -159,6 +160,7 @@ struct dwc2_host_chan { struct dwc2_qh *qh; struct list_head hc_list_entry; dma_addr_t desc_list_addr; + u32 desc_list_sz; }; struct dwc2_hcd_pipe_info { @@ -251,6 +253,7 @@ enum dwc2_transaction_type { * schedule * @desc_list: List of transfer descriptors * @desc_list_dma: Physical address of desc_list + * @desc_list_sz: Size of descriptors list * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * descriptor and indicates original XferSize value for the * descriptor @@ -284,6 +287,7 @@ struct dwc2_qh { struct list_head qh_list_entry; struct dwc2_hcd_dma_desc *desc_list; dma_addr_t desc_list_dma; + u32 desc_list_sz; u32 *n_bytes; unsigned tt_buffer_dirty:1; }; @@ -340,6 +344,8 @@ struct dwc2_qtd { u8 isoc_split_pos; u16 isoc_frame_index; u16 isoc_split_offset; + u16 isoc_td_last; + u16 isoc_td_first; u32 ssplit_out_xfer_count; u8 error_count; u8 n_desc; @@ -378,18 +384,6 @@ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr) } /* - * Returns the mode of operation, host or device - */ -static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg) -{ - return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0; -} -static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) -{ - return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0; -} - -/* * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they * are read as 1, they won't clear when written back. */ @@ -535,6 +529,19 @@ static inline bool dbg_perio(void) { return false; } #define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff) /* + * Returns true if frame1 index is greater than frame2 index. The comparison + * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the + * frame number when the max index frame number is reached. + */ +static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2) +{ + u16 diff = fr_idx1 - fr_idx2; + u16 sign = diff & (FRLISTEN_64_SIZE >> 1); + + return diff && !sign; +} + +/* * Returns true if frame1 is less than or equal to frame2. The comparison is * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the * frame number when the max frame number is reached. diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 78993aba9335..36606fc33c0d 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -87,22 +87,31 @@ static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, gfp_t flags) { - qh->desc_list = dma_alloc_coherent(hsotg->dev, - sizeof(struct dwc2_hcd_dma_desc) * - dwc2_max_desc_num(qh), &qh->desc_list_dma, - flags); + struct kmem_cache *desc_cache; + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + + qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * + dwc2_max_desc_num(qh); + + qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); if (!qh->desc_list) return -ENOMEM; - memset(qh->desc_list, 0, - sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); + qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list, + qh->desc_list_sz, + DMA_TO_DEVICE); qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags); if (!qh->n_bytes) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, + DMA_FROM_DEVICE); + kfree(qh->desc_list); qh->desc_list = NULL; return -ENOMEM; } @@ -112,10 +121,18 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + struct kmem_cache *desc_cache; + + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + if (qh->desc_list) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, DMA_FROM_DEVICE); + kmem_cache_free(desc_cache, qh->desc_list); qh->desc_list = NULL; } @@ -128,21 +145,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) if (hsotg->frame_list) return 0; - hsotg->frame_list = dma_alloc_coherent(hsotg->dev, - 4 * FRLISTEN_64_SIZE, - &hsotg->frame_list_dma, - mem_flags); + hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; + hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA); if (!hsotg->frame_list) return -ENOMEM; - memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); + hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + return 0; } static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) { - u32 *frame_list; - dma_addr_t frame_list_dma; unsigned long flags; spin_lock_irqsave(&hsotg->lock, flags); @@ -152,14 +168,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) return; } - frame_list = hsotg->frame_list; - frame_list_dma = hsotg->frame_list_dma; + dma_unmap_single(hsotg->dev, hsotg->frame_list_dma, + hsotg->frame_list_sz, DMA_FROM_DEVICE); + + kfree(hsotg->frame_list); hsotg->frame_list = NULL; spin_unlock_irqrestore(&hsotg->lock, flags); - dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list, - frame_list_dma); } static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) @@ -249,6 +265,15 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, j = (j + inc) & (FRLISTEN_64_SIZE - 1); } while (j != i); + /* + * Sync frame list since controller will access it if periodic + * channel is currently enabled. + */ + dma_sync_single_for_device(hsotg->dev, + hsotg->frame_list_dma, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + if (!enable) return; @@ -278,6 +303,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, hsotg->non_periodic_channels--; } else { dwc2_update_frame_list(hsotg, qh, 0); + hsotg->available_host_channels++; } /* @@ -360,6 +386,8 @@ err0: */ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + unsigned long flags; + dwc2_desc_list_free(hsotg, qh); /* @@ -369,8 +397,10 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * when it comes here from endpoint disable routine * channel remains assigned. */ + spin_lock_irqsave(&hsotg->lock, flags); if (qh->channel) dwc2_release_channel_ddma(hsotg, qh); + spin_unlock_irqrestore(&hsotg->lock, flags); if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || qh->ep_type == USB_ENDPOINT_XFER_INT) && @@ -524,14 +554,23 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & HOST_DMA_ISOC_NBYTES_MASK; + /* Set active bit */ + dma_desc->status |= HOST_DMA_A; + + qh->ntd++; + qtd->isoc_frame_index_last++; + #ifdef ISOC_URB_GIVEBACK_ASAP /* Set IOC for each descriptor corresponding to last frame of URB */ if (qtd->isoc_frame_index_last == qtd->urb->packet_count) dma_desc->status |= HOST_DMA_IOC; #endif - qh->ntd++; - qtd->isoc_frame_index_last++; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, @@ -539,11 +578,32 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, { struct dwc2_qtd *qtd; u32 max_xfer_size; - u16 idx, inc, n_desc, ntd_max = 0; + u16 idx, inc, n_desc = 0, ntd_max = 0; + u16 cur_idx; + u16 next_idx; idx = qh->td_last; inc = qh->interval; - n_desc = 0; + hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); + + /* + * Ensure current frame number didn't overstep last scheduled + * descriptor. If it happens, the only way to recover is to move + * qh->td_last to current frame number + 1. + * So that next isoc descriptor will be scheduled on frame number + 1 + * and not on a past frame. + */ + if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { + if (inc < 32) { + dev_vdbg(hsotg->dev, + "current frame number overstep last descriptor\n"); + qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, + qh->dev_speed); + idx = qh->td_last; + } + } if (qh->interval) { ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / @@ -556,15 +616,20 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { + if (qtd->in_process && + qtd->isoc_frame_index_last == + qtd->urb->packet_count) + continue; + + qtd->isoc_td_first = idx; while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < qtd->urb->packet_count) { - if (n_desc > 1) - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, max_xfer_size, idx); idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); n_desc++; } + qtd->isoc_td_last = idx; qtd->in_process = 1; } @@ -575,6 +640,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (qh->ntd == ntd_max) { idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } #else /* @@ -604,13 +674,12 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); #endif - - if (n_desc) { - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; - if (n_desc > 1) - qh->desc_list[0].status |= HOST_DMA_A; - } } static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, @@ -647,6 +716,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->buf = (u32)chan->xfer_dma; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (n_desc * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); + /* * Last (or only) descriptor of IN transfer with actual size less * than MaxPacket @@ -697,6 +772,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, "set A bit in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + ((n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); dev_vdbg(hsotg->dev, @@ -722,10 +803,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); if (n_desc > 1) { qh->desc_list[0].status |= HOST_DMA_A; dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", &qh->desc_list[0]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma, + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } chan->ntd = n_desc; } @@ -800,7 +890,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; + struct dwc2_hcd_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; u16 remain = 0; int rc = 0; @@ -808,6 +898,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (!qtd->urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + + dma_desc = &qh->desc_list[idx]; + frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); if (chan->ep_is_in) @@ -911,17 +1008,51 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { if (!qtd->in_process) break; + + /* + * Ensure idx corresponds to descriptor where first urb of this + * qtd was added. In fact, during isoc desc init, dwc2 may skip + * an index if current frame number is already over this index. + */ + if (idx != qtd->isoc_td_first) { + dev_vdbg(hsotg->dev, + "try to complete %d instead of %d\n", + idx, qtd->isoc_td_first); + idx = qtd->isoc_td_first; + } + do { + struct dwc2_qtd *qtd_next; + u16 cur_idx; + rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, idx); if (rc < 0) return; idx = dwc2_desclist_idx_inc(idx, qh->interval, chan->speed); - if (rc == DWC2_CMPL_STOP) - goto stop_scan; + if (!rc) + continue; + if (rc == DWC2_CMPL_DONE) break; + + /* rc == DWC2_CMPL_STOP */ + + if (qh->interval >= 32) + goto stop_scan; + + qh->td_first = idx; + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + qtd_next = list_first_entry(&qh->qtd_list, + struct dwc2_qtd, + qtd_list_entry); + if (dwc2_frame_idx_num_gt(cur_idx, + qtd_next->isoc_td_last)) + break; + + goto stop_scan; + } while (idx != qh->td_first); } @@ -1029,6 +1160,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, if (!urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, + qh->desc_list_dma + (desc_num * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + dma_desc = &qh->desc_list[desc_num]; n_bytes = qh->n_bytes[desc_num]; dev_vdbg(hsotg->dev, @@ -1037,7 +1174,10 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, halt_status, n_bytes, xfer_done); - if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { + if (*xfer_done && urb->status != -EINPROGRESS) + failed = 1; + + if (failed) { dwc2_host_complete(hsotg, qtd, urb->status); dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", @@ -1165,6 +1305,21 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, /* Release the channel if halted or session completed */ if (halt_status != DWC2_HC_XFER_COMPLETE || list_empty(&qh->qtd_list)) { + struct dwc2_qtd *qtd, *qtd_tmp; + + /* + * Kill all remainings QTDs since channel has been + * halted. + */ + list_for_each_entry_safe(qtd, qtd_tmp, + &qh->qtd_list, + qtd_list_entry) { + dwc2_host_complete(hsotg, qtd, + -ECONNRESET); + dwc2_hcd_qtd_unlink_and_free(hsotg, + qtd, qh); + } + /* Halt the channel if session completed */ if (halt_status == DWC2_HC_XFER_COMPLETE) dwc2_hc_halt(hsotg, chan, halt_status); @@ -1174,7 +1329,12 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, /* Keep in assigned schedule to continue transfer */ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); - continue_isoc_xfer = 1; + /* + * If channel has been halted during giveback of urb + * then prevent any new scheduling. + */ + if (!chan->halt_status) + continue_isoc_xfer = 1; } /* * Todo: Consider the case when period exceeds FrameList size. diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index bda0b21b850f..f8253803a050 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -122,6 +122,9 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) struct dwc2_qh *qh; enum dwc2_transaction_type tr_type; + /* Clear interrupt */ + dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS); + #ifdef DEBUG_SOF dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n"); #endif @@ -146,9 +149,6 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) tr_type = dwc2_hcd_select_transactions(hsotg); if (tr_type != DWC2_TRANSACTION_NONE) dwc2_hcd_queue_transactions(hsotg, tr_type); - - /* Clear interrupt */ - dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS); } /* @@ -312,6 +312,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, if (do_reset) { *hprt0_modify |= HPRT0_RST; + dwc2_writel(*hprt0_modify, hsotg->regs + HPRT0); queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work, msecs_to_jiffies(60)); } else { @@ -347,15 +348,12 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) * Set flag and clear if detected */ if (hprt0 & HPRT0_CONNDET) { + dwc2_writel(hprt0_modify | HPRT0_CONNDET, hsotg->regs + HPRT0); + dev_vdbg(hsotg->dev, "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n", hprt0); - if (hsotg->lx_state != DWC2_L0) - usb_hcd_resume_root_hub(hsotg->priv); - - hsotg->flags.b.port_connect_status_change = 1; - hsotg->flags.b.port_connect_status = 1; - hprt0_modify |= HPRT0_CONNDET; + dwc2_hcd_connect(hsotg); /* * The Hub driver asserts a reset when it sees port connect @@ -368,27 +366,36 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) * Clear if detected - Set internal flag if disabled */ if (hprt0 & HPRT0_ENACHG) { + dwc2_writel(hprt0_modify | HPRT0_ENACHG, hsotg->regs + HPRT0); dev_vdbg(hsotg->dev, " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n", hprt0, !!(hprt0 & HPRT0_ENA)); - hprt0_modify |= HPRT0_ENACHG; - if (hprt0 & HPRT0_ENA) + if (hprt0 & HPRT0_ENA) { + hsotg->new_connection = true; dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); - else + } else { hsotg->flags.b.port_enable_change = 1; + if (hsotg->core_params->dma_desc_fs_enable) { + u32 hcfg; + + hsotg->core_params->dma_desc_enable = 0; + hsotg->new_connection = false; + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg &= ~HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + } } /* Overcurrent Change Interrupt */ if (hprt0 & HPRT0_OVRCURRCHG) { + dwc2_writel(hprt0_modify | HPRT0_OVRCURRCHG, + hsotg->regs + HPRT0); dev_vdbg(hsotg->dev, " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n", hprt0); hsotg->flags.b.port_over_current_change = 1; - hprt0_modify |= HPRT0_OVRCURRCHG; } - - /* Clear Port Interrupts */ - dwc2_writel(hprt0_modify, hsotg->regs + HPRT0); } /* diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 7d8d06cfe3c1..27d402f680a3 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -232,7 +232,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, */ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - if (hsotg->core_params->dma_desc_enable > 0) { + if (qh->desc_list) { dwc2_hcd_qh_free_ddma(hsotg, qh); } else { /* kfree(NULL) is safe */ diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 553f24606c43..281b57b36ab4 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -769,10 +769,6 @@ #define TSIZ_XFERSIZE_SHIFT 0 #define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch)) -#define HCDMA_DMA_ADDR_MASK (0x1fffff << 11) -#define HCDMA_DMA_ADDR_SHIFT 11 -#define HCDMA_CTD_MASK (0xff << 3) -#define HCDMA_CTD_SHIFT 3 #define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch)) diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 39c1cbf0e75d..510f787434b3 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -54,11 +54,44 @@ static const char dwc2_driver_name[] = "dwc2"; +static const struct dwc2_core_params params_hi6220 = { + .otg_cap = 2, /* No HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_enable = 1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 512, + .host_perio_tx_fifo_size = 512, + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 16, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + static const struct dwc2_core_params params_bcm2835 = { .otg_cap = 0, /* HNP/SRP capable */ .otg_ver = 0, /* 1.3 */ .dma_enable = 1, .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, .speed = 0, /* High Speed */ .enable_dynamic_fifo = 1, .en_multiple_tx_fifo = 1, @@ -89,6 +122,7 @@ static const struct dwc2_core_params params_rk3066 = { .otg_ver = -1, .dma_enable = -1, .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, .speed = -1, .enable_dynamic_fifo = 1, .en_multiple_tx_fifo = -1, @@ -115,6 +149,71 @@ static const struct dwc2_core_params params_rk3066 = { .hibernation = -1, }; +/* + * Check the dr_mode against the module configuration and hardware + * capabilities. + * + * The hardware, module, and dr_mode, can each be set to host, device, + * or otg. Check that all these values are compatible and adjust the + * value of dr_mode if possible. + * + * actual + * HW MOD dr_mode dr_mode + * ------------------------------ + * HST HST any : HST + * HST DEV any : --- + * HST OTG any : HST + * + * DEV HST any : --- + * DEV DEV any : DEV + * DEV OTG any : DEV + * + * OTG HST any : HST + * OTG DEV any : DEV + * OTG OTG any : dr_mode + */ +static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg) +{ + enum usb_dr_mode mode; + + hsotg->dr_mode = usb_get_dr_mode(hsotg->dev); + if (hsotg->dr_mode == USB_DR_MODE_UNKNOWN) + hsotg->dr_mode = USB_DR_MODE_OTG; + + mode = hsotg->dr_mode; + + if (dwc2_hw_is_device(hsotg)) { + if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) { + dev_err(hsotg->dev, + "Controller does not support host mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_PERIPHERAL; + } else if (dwc2_hw_is_host(hsotg)) { + if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) { + dev_err(hsotg->dev, + "Controller does not support device mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_HOST; + } else { + if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) + mode = USB_DR_MODE_PERIPHERAL; + } + + if (mode != hsotg->dr_mode) { + dev_warn(hsotg->dev, + "Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "device"); + + hsotg->dr_mode = mode; + } + + return 0; +} + static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) { struct platform_device *pdev = to_platform_device(hsotg->dev); @@ -306,8 +405,28 @@ static int dwc2_driver_remove(struct platform_device *dev) return 0; } +/** + * dwc2_driver_shutdown() - Called on device shutdown + * + * @dev: Platform device + * + * In specific conditions (involving usb hubs) dwc2 devices can create a + * lot of interrupts, even to the point of overwhelming devices running + * at low frequencies. Some devices need to do special clock handling + * at shutdown-time which may bring the system clock below the threshold + * of being able to handle the dwc2 interrupts. Disabling dwc2-irqs + * prevents reboots/poweroffs from getting stuck in such cases. + */ +static void dwc2_driver_shutdown(struct platform_device *dev) +{ + struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); + + disable_irq(hsotg->irq); +} + static const struct of_device_id dwc2_of_match_table[] = { { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, + { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, { .compatible = "snps,dwc2", .data = NULL }, { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, @@ -335,7 +454,6 @@ static int dwc2_driver_probe(struct platform_device *dev) struct dwc2_hsotg *hsotg; struct resource *res; int retval; - int irq; match = of_match_device(dwc2_of_match_table, &dev->dev); if (match && match->data) { @@ -348,8 +466,10 @@ static int dwc2_driver_probe(struct platform_device *dev) /* * Disable descriptor dma mode by default as the HW can support * it, but does not support it for SPLIT transactions. + * Disable it for FS devices as well. */ defparams.dma_desc_enable = 0; + defparams.dma_desc_fs_enable = 0; } hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL); @@ -375,19 +495,6 @@ static int dwc2_driver_probe(struct platform_device *dev) dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", (unsigned long)res->start, hsotg->regs); - hsotg->dr_mode = usb_get_dr_mode(&dev->dev); - if (IS_ENABLED(CONFIG_USB_DWC2_HOST) && - hsotg->dr_mode != USB_DR_MODE_HOST) { - hsotg->dr_mode = USB_DR_MODE_HOST; - dev_warn(hsotg->dev, - "Configuration mismatch. Forcing host mode\n"); - } else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) && - hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { - hsotg->dr_mode = USB_DR_MODE_PERIPHERAL; - dev_warn(hsotg->dev, - "Configuration mismatch. Forcing peripheral mode\n"); - } - retval = dwc2_lowlevel_hw_init(hsotg); if (retval) return retval; @@ -401,15 +508,15 @@ static int dwc2_driver_probe(struct platform_device *dev) dwc2_set_all_params(hsotg->core_params, -1); - irq = platform_get_irq(dev, 0); - if (irq < 0) { + hsotg->irq = platform_get_irq(dev, 0); + if (hsotg->irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); - return irq; + return hsotg->irq; } dev_dbg(hsotg->dev, "registering common handler for irq%d\n", - irq); - retval = devm_request_irq(hsotg->dev, irq, + hsotg->irq); + retval = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_handle_common_intr, IRQF_SHARED, dev_name(hsotg->dev), hsotg); if (retval) @@ -419,7 +526,11 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) return retval; - /* Detect config values from hardware */ + retval = dwc2_get_dr_mode(hsotg); + if (retval) + return retval; + + /* Reset the controller and detect hardware config values */ retval = dwc2_get_hwparams(hsotg); if (retval) goto error; @@ -427,15 +538,17 @@ static int dwc2_driver_probe(struct platform_device *dev) /* Validate parameter values */ dwc2_set_parameters(hsotg, params); + dwc2_force_dr_mode(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_HOST) { - retval = dwc2_gadget_init(hsotg, irq); + retval = dwc2_gadget_init(hsotg, hsotg->irq); if (retval) goto error; hsotg->gadget_enabled = 1; } if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { - retval = dwc2_hcd_init(hsotg, irq); + retval = dwc2_hcd_init(hsotg, hsotg->irq); if (retval) { if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); @@ -502,6 +615,7 @@ static struct platform_driver dwc2_platform_driver = { }, .probe = dwc2_driver_probe, .remove = dwc2_driver_remove, + .shutdown = dwc2_driver_shutdown, }; module_platform_driver(dwc2_platform_driver); diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 5a42c4590402..a64ce1c94d6d 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -87,6 +87,15 @@ config USB_DWC3_KEYSTONE Support of USB2/3 functionality in TI Keystone2 platforms. Say 'Y' or 'M' here if you have one such device +config USB_DWC3_OF_SIMPLE + tristate "Generic OF Simple Glue Layer" + depends on OF && COMMON_CLK + default USB_DWC3 + help + Support USB2/3 functionality in simple SoC integrations. + Currently supports Xilinx and Qualcomm DWC USB3 IP. + Say 'Y' or 'M' if you have one such device. + config USB_DWC3_ST tristate "STMicroelectronics Platforms" depends on ARCH_STI && OF @@ -96,12 +105,4 @@ config USB_DWC3_ST inside (i.e. STiH407). Say 'Y' or 'M' if you have one such device. -config USB_DWC3_QCOM - tristate "Qualcomm Platforms" - depends on ARCH_QCOM || COMPILE_TEST - default USB_DWC3 - help - Recent Qualcomm SoCs ship with one DesignWare Core USB3 IP inside, - say 'Y' or 'M' if you have one such device. - endif diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index acc951d46c27..22420e17d68b 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -37,5 +37,5 @@ obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o -obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o +obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 22b4797383cd..de5e01f41bc2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -272,7 +272,8 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc) for (n = 0; n < dwc->num_event_buffers; n++) { evt = dwc->ev_buffs[n]; - dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n", + dwc3_trace(trace_dwc3_core, + "Event buf %p dma %08llx length %d\n", evt->buf, (unsigned long long) evt->dma, evt->length); @@ -608,12 +609,13 @@ static int dwc3_core_init(struct dwc3 *dwc) reg |= DWC3_GCTL_GBLHIBERNATIONEN; break; default: - dev_dbg(dwc->dev, "No power optimization available\n"); + dwc3_trace(trace_dwc3_core, "No power optimization available\n"); } /* check if current dwc3 is on simulation board */ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { - dev_dbg(dwc->dev, "it is on FPGA board\n"); + dwc3_trace(trace_dwc3_core, + "running on FPGA platform\n"); dwc->is_fpga = true; } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 36f1cb74588c..29130682e547 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -37,6 +37,7 @@ #define DWC3_MSG_MAX 500 /* Global constants */ +#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */ #define DWC3_EP0_BOUNCE_SIZE 512 #define DWC3_ENDPOINTS_NUM 32 #define DWC3_XHCI_RESOURCES_NUM 2 @@ -647,6 +648,7 @@ struct dwc3_scratchpad_array { * @ctrl_req: usb control request which is used for ep0 * @ep0_trb: trb which is used for the ctrl_req * @ep0_bounce: bounce buffer for ep0 + * @zlp_buf: used when request->zero is set * @setup_buf: used while precessing STD USB requests * @ctrl_req_addr: dma address of ctrl_req * @ep0_trb: dma address of ep0_trb @@ -734,6 +736,7 @@ struct dwc3 { struct usb_ctrlrequest *ctrl_req; struct dwc3_trb *ep0_trb; void *ep0_bounce; + void *zlp_buf; void *scratchbuf; u8 *setup_buf; dma_addr_t ctrl_req_addr; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c new file mode 100644 index 000000000000..9c9f74155066 --- /dev/null +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -0,0 +1,180 @@ +/** + * dwc3-of-simple.c - OF glue layer for simple integrations + * + * Copyright (c) 2015 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Felipe Balbi <balbi@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This is a combination of the old dwc3-qcom.c by Ivan T. Ivanov + * <iivanov@mm-sol.com> and the original patch adding support for Xilinx' SoC + * by Subbaraya Sundeep Bhatta <subbaraya.sundeep.bhatta@xilinx.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> + +struct dwc3_of_simple { + struct device *dev; + struct clk **clks; + int num_clocks; +}; + +static int dwc3_of_simple_probe(struct platform_device *pdev) +{ + struct dwc3_of_simple *simple; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + int ret; + int i; + + simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL); + if (!simple) + return -ENOMEM; + + ret = of_clk_get_parent_count(np); + if (ret < 0) + return ret; + + simple->num_clocks = ret; + + simple->clks = devm_kcalloc(dev, simple->num_clocks, + sizeof(struct clk *), GFP_KERNEL); + if (!simple->clks) + return -ENOMEM; + + simple->dev = dev; + + for (i = 0; i < simple->num_clocks; i++) { + struct clk *clk; + + clk = of_clk_get(np, i); + if (IS_ERR(clk)) { + while (--i >= 0) + clk_put(simple->clks[i]); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret < 0) { + while (--i >= 0) { + clk_disable_unprepare(simple->clks[i]); + clk_put(simple->clks[i]); + } + clk_put(clk); + + return ret; + } + + simple->clks[i] = clk; + } + + ret = of_platform_populate(np, NULL, NULL, dev); + if (ret) { + for (i = 0; i < simple->num_clocks; i++) { + clk_disable_unprepare(simple->clks[i]); + clk_put(simple->clks[i]); + } + + return ret; + } + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + return 0; +} + +static int dwc3_of_simple_remove(struct platform_device *pdev) +{ + struct dwc3_of_simple *simple = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int i; + + for (i = 0; i < simple->num_clocks; i++) { + clk_unprepare(simple->clks[i]); + clk_put(simple->clks[i]); + } + + of_platform_depopulate(dev); + + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + return 0; +} + +#ifdef CONFIG_PM +static int dwc3_of_simple_runtime_suspend(struct device *dev) +{ + struct dwc3_of_simple *simple = dev_get_drvdata(dev); + int i; + + for (i = 0; i < simple->num_clocks; i++) + clk_disable(simple->clks[i]); + + return 0; +} + +static int dwc3_of_simple_runtime_resume(struct device *dev) +{ + struct dwc3_of_simple *simple = dev_get_drvdata(dev); + int ret; + int i; + + for (i = 0; i < simple->num_clocks; i++) { + ret = clk_enable(simple->clks[i]); + if (ret < 0) { + while (--i >= 0) + clk_disable(simple->clks[i]); + return ret; + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { + SET_RUNTIME_PM_OPS(dwc3_of_simple_runtime_suspend, + dwc3_of_simple_runtime_resume, NULL) +}; + +static const struct of_device_id of_dwc3_simple_match[] = { + { .compatible = "qcom,dwc3" }, + { .compatible = "xlnx,zynqmp-dwc3" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); + +static struct platform_driver dwc3_of_simple_driver = { + .probe = dwc3_of_simple_probe, + .remove = dwc3_of_simple_remove, + .driver = { + .name = "dwc3-of-simple", + .of_match_table = of_dwc3_simple_match, + }, +}; + +module_platform_driver(dwc3_of_simple_driver); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DesignWare USB3 OF Simple Glue Layer"); +MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c deleted file mode 100644 index 088026048f49..000000000000 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <linux/platform_device.h> - -struct dwc3_qcom { - struct device *dev; - - struct clk *core_clk; - struct clk *iface_clk; - struct clk *sleep_clk; -}; - -static int dwc3_qcom_probe(struct platform_device *pdev) -{ - struct device_node *node = pdev->dev.of_node; - struct dwc3_qcom *qdwc; - int ret; - - qdwc = devm_kzalloc(&pdev->dev, sizeof(*qdwc), GFP_KERNEL); - if (!qdwc) - return -ENOMEM; - - platform_set_drvdata(pdev, qdwc); - - qdwc->dev = &pdev->dev; - - qdwc->core_clk = devm_clk_get(qdwc->dev, "core"); - if (IS_ERR(qdwc->core_clk)) { - dev_err(qdwc->dev, "failed to get core clock\n"); - return PTR_ERR(qdwc->core_clk); - } - - qdwc->iface_clk = devm_clk_get(qdwc->dev, "iface"); - if (IS_ERR(qdwc->iface_clk)) { - dev_info(qdwc->dev, "failed to get optional iface clock\n"); - qdwc->iface_clk = NULL; - } - - qdwc->sleep_clk = devm_clk_get(qdwc->dev, "sleep"); - if (IS_ERR(qdwc->sleep_clk)) { - dev_info(qdwc->dev, "failed to get optional sleep clock\n"); - qdwc->sleep_clk = NULL; - } - - ret = clk_prepare_enable(qdwc->core_clk); - if (ret) { - dev_err(qdwc->dev, "failed to enable core clock\n"); - goto err_core; - } - - ret = clk_prepare_enable(qdwc->iface_clk); - if (ret) { - dev_err(qdwc->dev, "failed to enable optional iface clock\n"); - goto err_iface; - } - - ret = clk_prepare_enable(qdwc->sleep_clk); - if (ret) { - dev_err(qdwc->dev, "failed to enable optional sleep clock\n"); - goto err_sleep; - } - - ret = of_platform_populate(node, NULL, NULL, qdwc->dev); - if (ret) { - dev_err(qdwc->dev, "failed to register core - %d\n", ret); - goto err_clks; - } - - return 0; - -err_clks: - clk_disable_unprepare(qdwc->sleep_clk); -err_sleep: - clk_disable_unprepare(qdwc->iface_clk); -err_iface: - clk_disable_unprepare(qdwc->core_clk); -err_core: - return ret; -} - -static int dwc3_qcom_remove(struct platform_device *pdev) -{ - struct dwc3_qcom *qdwc = platform_get_drvdata(pdev); - - of_platform_depopulate(&pdev->dev); - - clk_disable_unprepare(qdwc->sleep_clk); - clk_disable_unprepare(qdwc->iface_clk); - clk_disable_unprepare(qdwc->core_clk); - - return 0; -} - -static const struct of_device_id of_dwc3_match[] = { - { .compatible = "qcom,dwc3" }, - { /* Sentinel */ } -}; -MODULE_DEVICE_TABLE(of, of_dwc3_match); - -static struct platform_driver dwc3_qcom_driver = { - .probe = dwc3_qcom_probe, - .remove = dwc3_qcom_remove, - .driver = { - .name = "qcom-dwc3", - .of_match_table = of_dwc3_match, - }, -}; - -module_platform_driver(dwc3_qcom_driver); - -MODULE_ALIAS("platform:qcom-dwc3"); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("DesignWare USB3 QCOM Glue Layer"); -MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>"); diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 5320e939e090..3a9354abcb68 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -817,6 +817,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { + dwc->setup_packet_pending = true; + dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); if (r) @@ -916,8 +918,10 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc, } status = DWC3_TRB_SIZE_TRBSTS(trb->size); - if (status == DWC3_TRBSTS_SETUP_PENDING) + if (status == DWC3_TRBSTS_SETUP_PENDING) { + dwc->setup_packet_pending = true; dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); + } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); @@ -971,7 +975,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = usb_gadget_map_request(&dwc->gadget, &req->request, dep->number); if (ret) { - dev_dbg(dwc->dev, "failed to map request\n"); + dwc3_trace(trace_dwc3_ep0, "failed to map request\n"); return; } @@ -999,7 +1003,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = usb_gadget_map_request(&dwc->gadget, &req->request, dep->number); if (ret) { - dev_dbg(dwc->dev, "failed to map request\n"); + dwc3_trace(trace_dwc3_ep0, "failed to map request\n"); return; } @@ -1063,8 +1067,6 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) static void dwc3_ep0_xfernotready(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { - dwc->setup_packet_pending = true; - switch (event->status) { case DEPEVT_STATUS_CONTROL_DATA: dwc3_trace(trace_dwc3_ep0, "Control Data"); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a58376fd65fe..af023a81a0b0 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -265,9 +265,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, usb_gadget_unmap_request(&dwc->gadget, &req->request, req->direction); - dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", - req, dep->name, req->request.actual, - req->request.length, status); trace_dwc3_gadget_giveback(req); spin_unlock(&dwc->lock); @@ -664,11 +661,10 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep, dep = to_dwc3_ep(ep); dwc = dep->dwc; - if (dep->flags & DWC3_EP_ENABLED) { - dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", - dep->name); + if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, + "%s is already enabled\n", + dep->name)) return 0; - } spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); @@ -692,11 +688,10 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep) dep = to_dwc3_ep(ep); dwc = dep->dwc; - if (!(dep->flags & DWC3_EP_ENABLED)) { - dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", - dep->name); + if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), + "%s is already disabled\n", + dep->name)) return 0; - } spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_disable(dep); @@ -985,8 +980,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, cmd |= DWC3_DEPCMD_PARAM(cmd_param); ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); if (ret < 0) { - dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); - /* * FIXME we need to iterate over the list of requests * here and stop, unmap, free and del each of the linked @@ -1044,6 +1037,20 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) struct dwc3 *dwc = dep->dwc; int ret; + if (!dep->endpoint.desc) { + dwc3_trace(trace_dwc3_gadget, + "trying to queue request %p to disabled %s\n", + &req->request, dep->endpoint.name); + return -ESHUTDOWN; + } + + if (WARN(req->dep != dep, "request %p belongs to '%s'\n", + &req->request, req->dep->name)) { + dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n", + &req->request, req->dep->name); + return -EINVAL; + } + req->request.actual = 0; req->request.status = -EINPROGRESS; req->direction = dep->direction; @@ -1141,7 +1148,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) out: if (ret && ret != -EBUSY) - dev_dbg(dwc->dev, "%s: failed to kick transfers\n", + dwc3_trace(trace_dwc3_gadget, + "%s: failed to kick transfers\n", dep->name); if (ret == -EBUSY) ret = 0; @@ -1149,6 +1157,32 @@ out: return ret; } +static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, + struct usb_request *request) +{ + dwc3_gadget_ep_free_request(ep, request); +} + +static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) +{ + struct dwc3_request *req; + struct usb_request *request; + struct usb_ep *ep = &dep->endpoint; + + dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n"); + request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); + if (!request) + return -ENOMEM; + + request->length = 0; + request->buf = dwc->zlp_buf; + request->complete = __dwc3_gadget_ep_zlp_complete; + + req = to_dwc3_request(request); + + return __dwc3_gadget_ep_queue(dep, req); +} + static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags) { @@ -1161,22 +1195,18 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, int ret; spin_lock_irqsave(&dwc->lock, flags); - if (!dep->endpoint.desc) { - dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", - request, ep->name); - ret = -ESHUTDOWN; - goto out; - } - - if (WARN(req->dep != dep, "request %p belongs to '%s'\n", - request, req->dep->name)) { - ret = -EINVAL; - goto out; - } - ret = __dwc3_gadget_ep_queue(dep, req); -out: + /* + * Okay, here's the thing, if gadget driver has requested for a ZLP by + * setting request->zero, instead of doing magic, we will just queue an + * extra usb_request ourselves so that it gets handled the same way as + * any other request. + */ + if (ret == 0 && request->zero && request->length && + (request->length % ep->maxpacket == 0)) + ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); + spin_unlock_irqrestore(&dwc->lock, flags); return ret; @@ -1246,7 +1276,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || (!list_empty(&dep->req_queued) || !list_empty(&dep->request_list)))) { - dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", + dwc3_trace(trace_dwc3_gadget, + "%s: pending request, cannot halt\n", dep->name); return -EAGAIN; } @@ -1373,7 +1404,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g) speed = reg & DWC3_DSTS_CONNECTSPD; if (speed == DWC3_DSTS_SUPERSPEED) { - dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); + dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); ret = -EINVAL; goto out; } @@ -1385,8 +1416,9 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g) case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ break; default: - dev_dbg(dwc->dev, "can't wakeup from link state %d\n", - link_state); + dwc3_trace(trace_dwc3_gadget, + "can't wakeup from '%s'\n", + dwc3_gadget_link_string(link_state)); ret = -EINVAL; goto out; } @@ -1825,7 +1857,8 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, if (count) { trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { - dev_dbg(dwc->dev, "incomplete IN transfer %s\n", + dwc3_trace(trace_dwc3_gadget, + "%s: incomplete IN transfer\n", dep->name); /* * If missed isoc occurred and there is @@ -1887,10 +1920,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, do { req = next_request(&dep->req_queued); - if (!req) { - WARN_ON_ONCE(1); + if (WARN_ON_ONCE(!req)) return 1; - } + i = 0; do { slot = req->start_slot + i; @@ -2004,7 +2036,8 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->resource_index = 0; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", + dwc3_trace(trace_dwc3_gadget, + "%s is an Isochronous endpoint\n", dep->name); return; } @@ -2031,7 +2064,8 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, if (!ret || ret == -EBUSY) return; - dev_dbg(dwc->dev, "%s: failed to kick transfers\n", + dwc3_trace(trace_dwc3_gadget, + "%s: failed to kick transfers\n", dep->name); } @@ -2053,11 +2087,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, case DEPEVT_STREAMEVT_NOTFOUND: /* FALLTHROUGH */ default: - dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); + dwc3_trace(trace_dwc3_gadget, + "unable to find suitable stream\n"); } break; case DWC3_DEPEVT_RXTXFIFOEVT: - dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); + dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name); break; case DWC3_DEPEVT_EPCMDCMPLT: dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); @@ -2230,8 +2265,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) * * Our suggested workaround is to follow the Disconnect * Event steps here, instead, based on a setup_packet_pending - * flag. Such flag gets set whenever we have a XferNotReady - * event on EP0 and gets cleared on XferComplete for the + * flag. Such flag gets set whenever we have a SETUP_PENDING + * status for EP0 TRBs and gets cleared on XferComplete for the * same endpoint. * * Refers to: @@ -2744,6 +2779,12 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err3; } + dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); + if (!dwc->zlp_buf) { + ret = -ENOMEM; + goto err4; + } + dwc->gadget.ops = &dwc3_gadget_ops; dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; @@ -2785,16 +2826,19 @@ int dwc3_gadget_init(struct dwc3 *dwc) ret = dwc3_gadget_init_endpoints(dwc); if (ret) - goto err4; + goto err5; ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); if (ret) { dev_err(dwc->dev, "failed to register udc\n"); - goto err4; + goto err5; } return 0; +err5: + kfree(dwc->zlp_buf); + err4: dwc3_gadget_free_endpoints(dwc); dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, @@ -2827,6 +2871,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc) dwc->ep0_bounce, dwc->ep0_bounce_addr); kfree(dwc->setup_buf); + kfree(dwc->zlp_buf); dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), dwc->ep0_trb, dwc->ep0_trb_addr); diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index 9c10669ab91f..3ac7252f4427 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -117,6 +117,9 @@ DECLARE_EVENT_CLASS(dwc3_log_request, __field(unsigned, actual) __field(unsigned, length) __field(int, status) + __field(int, zero) + __field(int, short_not_ok) + __field(int, no_interrupt) ), TP_fast_assign( snprintf(__get_str(name), DWC3_MSG_MAX, "%s", req->dep->name); @@ -124,9 +127,15 @@ DECLARE_EVENT_CLASS(dwc3_log_request, __entry->actual = req->request.actual; __entry->length = req->request.length; __entry->status = req->request.status; + __entry->zero = req->request.zero; + __entry->short_not_ok = req->request.short_not_ok; + __entry->no_interrupt = req->request.no_interrupt; ), - TP_printk("%s: req %p length %u/%u ==> %d", + TP_printk("%s: req %p length %u/%u %s%s%s ==> %d", __get_str(name), __entry->req, __entry->actual, __entry->length, + __entry->zero ? "Z" : "z", + __entry->short_not_ok ? "S" : "s", + __entry->no_interrupt ? "i" : "I", __entry->status ) ); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 33834aa09ed4..be5aab9c13f2 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -127,6 +127,12 @@ config USB_GADGET_STORAGE_NUM_BUFFERS a module parameter as well. If unsure, say 2. +config U_SERIAL_CONSOLE + bool "Serial gadget console support" + depends on USB_G_SERIAL + help + It supports the serial gadget can be used as a console. + source "drivers/usb/gadget/udc/Kconfig" # diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 163d305e1200..590c44989e5e 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -56,7 +56,6 @@ struct gadget_info { struct list_head string_list; struct list_head available_func; - const char *udc_name; struct usb_composite_driver composite; struct usb_composite_dev cdev; bool use_os_desc; @@ -233,21 +232,23 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) { - return sprintf(page, "%s\n", to_gadget_info(item)->udc_name ?: ""); + char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; + + return sprintf(page, "%s\n", udc_name ?: ""); } static int unregister_gadget(struct gadget_info *gi) { int ret; - if (!gi->udc_name) + if (!gi->composite.gadget_driver.udc_name) return -ENODEV; ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver); if (ret) return ret; - kfree(gi->udc_name); - gi->udc_name = NULL; + kfree(gi->composite.gadget_driver.udc_name); + gi->composite.gadget_driver.udc_name = NULL; return 0; } @@ -271,14 +272,16 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, if (ret) goto err; } else { - if (gi->udc_name) { + if (gi->composite.gadget_driver.udc_name) { ret = -EBUSY; goto err; } - ret = usb_udc_attach_driver(name, &gi->composite.gadget_driver); - if (ret) + gi->composite.gadget_driver.udc_name = name; + ret = usb_gadget_probe_driver(&gi->composite.gadget_driver); + if (ret) { + gi->composite.gadget_driver.udc_name = NULL; goto err; - gi->udc_name = name; + } } mutex_unlock(&gi->lock); return len; @@ -427,9 +430,9 @@ static int config_usb_cfg_unlink( * remove the function. */ mutex_lock(&gi->lock); - if (gi->udc_name) + if (gi->composite.gadget_driver.udc_name) unregister_gadget(gi); - WARN_ON(gi->udc_name); + WARN_ON(gi->composite.gadget_driver.udc_name); list_for_each_entry(f, &cfg->func_list, list) { if (f->fi == fi) { @@ -873,10 +876,10 @@ static int os_desc_unlink(struct config_item *os_desc_ci, struct usb_composite_dev *cdev = &gi->cdev; mutex_lock(&gi->lock); - if (gi->udc_name) + if (gi->composite.gadget_driver.udc_name) unregister_gadget(gi); cdev->os_desc_config = NULL; - WARN_ON(gi->udc_name); + WARN_ON(gi->composite.gadget_driver.udc_name); mutex_unlock(&gi->lock); return 0; } diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 898a570319f1..fb1fe96d3215 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/device.h> +#include <linux/kfifo.h> #include <sound/core.h> #include <sound/initval.h> @@ -75,6 +76,7 @@ struct f_midi { struct usb_ep *in_ep, *out_ep; struct snd_card *card; struct snd_rawmidi *rmidi; + u8 ms_id; struct snd_rawmidi_substream *in_substream[MAX_PORTS]; struct snd_rawmidi_substream *out_substream[MAX_PORTS]; @@ -87,6 +89,9 @@ struct f_midi { int index; char *id; unsigned int buflen, qlen; + /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ + DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); + unsigned int in_last_port; }; static inline struct f_midi *func_to_midi(struct usb_function *f) @@ -94,7 +99,7 @@ static inline struct f_midi *func_to_midi(struct usb_function *f) return container_of(f, struct f_midi, func); } -static void f_midi_transmit(struct f_midi *midi, struct usb_request *req); +static void f_midi_transmit(struct f_midi *midi); DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); @@ -201,12 +206,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, return alloc_ep_req(ep, length, length); } -static void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static const uint8_t f_midi_cin_length[] = { 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 }; @@ -258,7 +257,8 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req) } else if (ep == midi->in_ep) { /* Our transmit completed. See if there's more to go. * f_midi_transmit eats req, don't queue it again. */ - f_midi_transmit(midi, req); + req->length = 0; + f_midi_transmit(midi); return; } break; @@ -269,10 +269,12 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req) case -ESHUTDOWN: /* disconnect from host */ VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); - if (ep == midi->out_ep) + if (ep == midi->out_ep) { f_midi_handle_out_data(ep, req); - - free_ep_req(ep, req); + /* We don't need to free IN requests because it's handled + * by the midi->in_req_fifo. */ + free_ep_req(ep, req); + } return; case -EOVERFLOW: /* buffer overrun on read means that @@ -324,12 +326,11 @@ static int f_midi_start_ep(struct f_midi *midi, static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_midi *midi = func_to_midi(f); - struct usb_composite_dev *cdev = f->config->cdev; unsigned i; int err; - /* For Control Device interface we do nothing */ - if (intf == 0) + /* we only set alt for MIDIStreaming interface */ + if (intf != midi->ms_id) return 0; err = f_midi_start_ep(midi, f, midi->in_ep); @@ -340,23 +341,19 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (err) return err; - usb_ep_disable(midi->out_ep); + /* pre-allocate write usb requests to use on f_midi_transmit. */ + while (kfifo_avail(&midi->in_req_fifo)) { + struct usb_request *req = + midi_alloc_ep_req(midi->in_ep, midi->buflen); - err = config_ep_by_speed(midi->gadget, f, midi->out_ep); - if (err) { - ERROR(cdev, "can't configure %s: %d\n", - midi->out_ep->name, err); - return err; - } + if (req == NULL) + return -ENOMEM; - err = usb_ep_enable(midi->out_ep); - if (err) { - ERROR(cdev, "can't start %s: %d\n", - midi->out_ep->name, err); - return err; - } + req->length = 0; + req->complete = f_midi_complete; - midi->out_ep->driver_data = midi; + kfifo_put(&midi->in_req_fifo, req); + } /* allocate a bunch of read buffers and queue them all at once. */ for (i = 0; i < midi->qlen && err == 0; i++) { @@ -368,9 +365,10 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) req->complete = f_midi_complete; err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC); if (err) { - ERROR(midi, "%s queue req: %d\n", + ERROR(midi, "%s: couldn't enqueue request: %d\n", midi->out_ep->name, err); free_ep_req(midi->out_ep, req); + return err; } } @@ -381,6 +379,7 @@ static void f_midi_disable(struct usb_function *f) { struct f_midi *midi = func_to_midi(f); struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = NULL; DBG(cdev, "disable\n"); @@ -390,6 +389,10 @@ static void f_midi_disable(struct usb_function *f) */ usb_ep_disable(midi->in_ep); usb_ep_disable(midi->out_ep); + + /* release IN requests */ + while (kfifo_get(&midi->in_req_fifo, &req)) + free_ep_req(midi->in_ep, req); } static int f_midi_snd_free(struct snd_device *device) @@ -511,57 +514,113 @@ static void f_midi_transmit_byte(struct usb_request *req, } } -static void f_midi_transmit(struct f_midi *midi, struct usb_request *req) +static void f_midi_drop_out_substreams(struct f_midi *midi) { - struct usb_ep *ep = midi->in_ep; - int i; - - if (!ep) - return; - - if (!req) - req = midi_alloc_ep_req(ep, midi->buflen); - - if (!req) { - ERROR(midi, "%s: alloc_ep_request failed\n", __func__); - return; - } - req->length = 0; - req->complete = f_midi_complete; + unsigned int i; for (i = 0; i < MAX_PORTS; i++) { struct gmidi_in_port *port = midi->in_port[i]; struct snd_rawmidi_substream *substream = midi->in_substream[i]; - if (!port || !port->active || !substream) + if (!port) + break; + + if (!port->active || !substream) continue; - while (req->length + 3 < midi->buflen) { - uint8_t b; - if (snd_rawmidi_transmit(substream, &b, 1) != 1) { - port->active = 0; + snd_rawmidi_drop_output(substream); + } +} + +static void f_midi_transmit(struct f_midi *midi) +{ + struct usb_ep *ep = midi->in_ep; + bool active; + + /* We only care about USB requests if IN endpoint is enabled */ + if (!ep || !ep->enabled) + goto drop_out; + + do { + struct usb_request *req = NULL; + unsigned int len, i; + + active = false; + + /* We peek the request in order to reuse it if it fails + * to enqueue on its endpoint */ + len = kfifo_peek(&midi->in_req_fifo, &req); + if (len != 1) { + ERROR(midi, "%s: Couldn't get usb request\n", __func__); + goto drop_out; + } + + /* If buffer overrun, then we ignore this transmission. + * IMPORTANT: This will cause the user-space rawmidi device to block until a) usb + * requests have been completed or b) snd_rawmidi_write() times out. */ + if (req->length > 0) + return; + + for (i = midi->in_last_port; i < MAX_PORTS; i++) { + struct gmidi_in_port *port = midi->in_port[i]; + struct snd_rawmidi_substream *substream = midi->in_substream[i]; + + if (!port) { + /* Reset counter when we reach the last available port */ + midi->in_last_port = 0; + break; + } + + if (!port->active || !substream) + continue; + + while (req->length + 3 < midi->buflen) { + uint8_t b; + + if (snd_rawmidi_transmit(substream, &b, 1) != 1) { + port->active = 0; + break; + } + f_midi_transmit_byte(req, port, b); + } + + active = !!port->active; + /* Check if last port is still active, which means that + * there is still data on that substream but this current + * request run out of space. */ + if (active) { + midi->in_last_port = i; + /* There is no need to re-iterate though midi ports. */ break; } - f_midi_transmit_byte(req, port, b); } - } - if (req->length > 0 && ep->enabled) { - int err; + if (req->length > 0) { + int err; - err = usb_ep_queue(ep, req, GFP_ATOMIC); - if (err < 0) - ERROR(midi, "%s queue req: %d\n", - midi->in_ep->name, err); - } else { - free_ep_req(ep, req); - } + err = usb_ep_queue(ep, req, GFP_ATOMIC); + if (err < 0) { + ERROR(midi, "%s failed to queue req: %d\n", + midi->in_ep->name, err); + req->length = 0; /* Re-use request next time. */ + } else { + /* Upon success, put request at the back of the queue. */ + kfifo_skip(&midi->in_req_fifo); + kfifo_put(&midi->in_req_fifo, req); + } + } + } while (active); + + return; + +drop_out: + f_midi_drop_out_substreams(midi); } static void f_midi_in_tasklet(unsigned long data) { struct f_midi *midi = (struct f_midi *) data; - f_midi_transmit(midi, NULL); + f_midi_transmit(midi); } static int f_midi_in_open(struct snd_rawmidi_substream *substream) @@ -687,6 +746,7 @@ static int f_midi_register_card(struct f_midi *midi) goto fail; } midi->rmidi = rmidi; + midi->in_last_port = 0; strcpy(rmidi->name, card->shortname); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | @@ -755,6 +815,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) goto fail; ms_interface_desc.bInterfaceNumber = status; ac_header_desc.baInterfaceNr[0] = status; + midi->ms_id = status; status = -ENODEV; @@ -1075,6 +1136,7 @@ static void f_midi_free(struct usb_function *f) mutex_lock(&opts->lock); for (i = opts->in_ports - 1; i >= 0; --i) kfree(midi->in_port[i]); + kfifo_free(&midi->in_req_fifo); kfree(midi); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1148,6 +1210,12 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) midi->index = opts->index; midi->buflen = opts->buflen; midi->qlen = opts->qlen; + midi->in_last_port = 0; + + status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL); + if (status) + goto setup_fail; + ++opts->refcnt; mutex_unlock(&opts->lock); diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9f3ced62d916..242ba5caffe5 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -34,13 +34,6 @@ * plus two that support control-OUT tests. If the optional "autoresume" * mode is enabled, it provides good functional coverage for the "USBCV" * test harness from USB-IF. - * - * Note that because this doesn't queue more than one request at a time, - * some other function must be used to test queueing logic. The network - * link (g_ether) is the best overall option for that, since its TX and RX - * queues are relatively independent, will receive a range of packet sizes, - * and can often be made to run out completely. Those issues are important - * when stress testing peripheral controller drivers. */ struct f_sourcesink { struct usb_function function; @@ -57,6 +50,8 @@ struct f_sourcesink { unsigned isoc_mult; unsigned isoc_maxburst; unsigned buflen; + unsigned bulk_qlen; + unsigned iso_qlen; }; static inline struct f_sourcesink *func_to_ss(struct usb_function *f) @@ -303,12 +298,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) return alloc_ep_req(ep, len, ss->buflen); } -void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) { int value; @@ -595,31 +584,33 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, { struct usb_ep *ep; struct usb_request *req; - int i, size, status; - - for (i = 0; i < 8; i++) { - if (is_iso) { - switch (speed) { - case USB_SPEED_SUPER: - size = ss->isoc_maxpacket * - (ss->isoc_mult + 1) * - (ss->isoc_maxburst + 1); - break; - case USB_SPEED_HIGH: - size = ss->isoc_maxpacket * (ss->isoc_mult + 1); - break; - default: - size = ss->isoc_maxpacket > 1023 ? - 1023 : ss->isoc_maxpacket; - break; - } - ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; - req = ss_alloc_ep_req(ep, size); - } else { - ep = is_in ? ss->in_ep : ss->out_ep; - req = ss_alloc_ep_req(ep, 0); + int i, size, qlen, status = 0; + + if (is_iso) { + switch (speed) { + case USB_SPEED_SUPER: + size = ss->isoc_maxpacket * + (ss->isoc_mult + 1) * + (ss->isoc_maxburst + 1); + break; + case USB_SPEED_HIGH: + size = ss->isoc_maxpacket * (ss->isoc_mult + 1); + break; + default: + size = ss->isoc_maxpacket > 1023 ? + 1023 : ss->isoc_maxpacket; + break; } + ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; + qlen = ss->iso_qlen; + } else { + ep = is_in ? ss->in_ep : ss->out_ep; + qlen = ss->bulk_qlen; + size = 0; + } + for (i = 0; i < qlen; i++) { + req = ss_alloc_ep_req(ep, size); if (!req) return -ENOMEM; @@ -638,10 +629,8 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", ep->name, status); free_ep_req(ep, req); + return status; } - - if (!is_iso) - break; } return status; @@ -869,6 +858,8 @@ static struct usb_function *source_sink_alloc_func( ss->isoc_mult = ss_opts->isoc_mult; ss->isoc_maxburst = ss_opts->isoc_maxburst; ss->buflen = ss_opts->bulk_buflen; + ss->bulk_qlen = ss_opts->bulk_qlen; + ss->iso_qlen = ss_opts->iso_qlen; ss->function.name = "source/sink"; ss->function.bind = sourcesink_bind; @@ -1153,6 +1144,82 @@ end: CONFIGFS_ATTR(f_ss_opts_, bulk_buflen); +static ssize_t f_ss_opts_bulk_qlen_show(struct config_item *item, char *page) +{ + struct f_ss_opts *opts = to_f_ss_opts(item); + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", opts->bulk_qlen); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_bulk_qlen_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_ss_opts *opts = to_f_ss_opts(item); + int ret; + u32 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &num); + if (ret) + goto end; + + opts->bulk_qlen = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +CONFIGFS_ATTR(f_ss_opts_, bulk_qlen); + +static ssize_t f_ss_opts_iso_qlen_show(struct config_item *item, char *page) +{ + struct f_ss_opts *opts = to_f_ss_opts(item); + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", opts->iso_qlen); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_iso_qlen_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_ss_opts *opts = to_f_ss_opts(item); + int ret; + u32 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &num); + if (ret) + goto end; + + opts->iso_qlen = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +CONFIGFS_ATTR(f_ss_opts_, iso_qlen); + static struct configfs_attribute *ss_attrs[] = { &f_ss_opts_attr_pattern, &f_ss_opts_attr_isoc_interval, @@ -1160,6 +1227,8 @@ static struct configfs_attribute *ss_attrs[] = { &f_ss_opts_attr_isoc_mult, &f_ss_opts_attr_isoc_maxburst, &f_ss_opts_attr_bulk_buflen, + &f_ss_opts_attr_bulk_qlen, + &f_ss_opts_attr_iso_qlen, NULL, }; @@ -1189,6 +1258,8 @@ static struct usb_function_instance *source_sink_alloc_inst(void) ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; + ss_opts->bulk_qlen = GZERO_SS_BULK_QLEN; + ss_opts->iso_qlen = GZERO_SS_ISO_QLEN; config_group_init_type_name(&ss_opts->func_inst.group, "", &ss_func_type); diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 15f180904f8a..492924d0d599 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h @@ -10,6 +10,8 @@ #define GZERO_QLEN 32 #define GZERO_ISOC_INTERVAL 4 #define GZERO_ISOC_MAXPACKET 1024 +#define GZERO_SS_BULK_QLEN 1 +#define GZERO_SS_ISO_QLEN 8 struct usb_zero_options { unsigned pattern; @@ -19,6 +21,8 @@ struct usb_zero_options { unsigned isoc_maxburst; unsigned bulk_buflen; unsigned qlen; + unsigned ss_bulk_qlen; + unsigned ss_iso_qlen; }; struct f_ss_opts { @@ -29,6 +33,8 @@ struct f_ss_opts { unsigned isoc_mult; unsigned isoc_maxburst; unsigned bulk_buflen; + unsigned bulk_qlen; + unsigned iso_qlen; /* * Read/write access to configfs attributes is handled by configfs. @@ -59,7 +65,6 @@ void lb_modexit(void); int lb_modinit(void); /* common utilities */ -void free_ep_req(struct usb_ep *ep, struct usb_request *req); void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, struct usb_ep *iso_in, struct usb_ep *iso_out); diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6554322af2c1..637809e3bd0d 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -143,21 +143,11 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult) static int ueth_change_mtu(struct net_device *net, int new_mtu) { - struct eth_dev *dev = netdev_priv(net); - unsigned long flags; - int status = 0; + if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) + return -ERANGE; + net->mtu = new_mtu; - /* don't change MTU on "live" link (peer won't know) */ - spin_lock_irqsave(&dev->lock, flags); - if (dev->port_usb) - status = -EBUSY; - else if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) - status = -ERANGE; - else - net->mtu = new_mtu; - spin_unlock_irqrestore(&dev->lock, flags); - - return status; + return 0; } static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index f7771d86ad6c..6af145f2a99d 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -27,6 +27,8 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/module.h> +#include <linux/console.h> +#include <linux/kthread.h> #include "u_serial.h" @@ -79,6 +81,7 @@ */ #define QUEUE_SIZE 16 #define WRITE_BUF_SIZE 8192 /* TX only */ +#define GS_CONSOLE_BUF_SIZE 8192 /* circular buffer */ struct gs_buf { @@ -88,6 +91,17 @@ struct gs_buf { char *buf_put; }; +/* console info */ +struct gscons_info { + struct gs_port *port; + struct task_struct *console_thread; + struct gs_buf con_buf; + /* protect the buf and busy flag */ + spinlock_t con_lock; + int req_busy; + struct usb_request *console_req; +}; + /* * The port structure holds info for each port, one for each minor number * (and thus for each /dev/ node). @@ -1023,6 +1037,246 @@ static const struct tty_operations gs_tty_ops = { static struct tty_driver *gs_tty_driver; +#ifdef CONFIG_U_SERIAL_CONSOLE + +static struct gscons_info gscons_info; +static struct console gserial_cons; + +static struct usb_request *gs_request_new(struct usb_ep *ep) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC); + if (!req) + return NULL; + + req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void gs_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (!req) + return; + + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void gs_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct gscons_info *info = &gscons_info; + + switch (req->status) { + default: + pr_warn("%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + case 0: + /* normal completion */ + spin_lock(&info->con_lock); + info->req_busy = 0; + spin_unlock(&info->con_lock); + + wake_up_process(info->console_thread); + break; + case -ESHUTDOWN: + /* disconnect */ + pr_vdebug("%s: %s shutdown\n", __func__, ep->name); + break; + } +} + +static int gs_console_connect(int port_num) +{ + struct gscons_info *info = &gscons_info; + struct gs_port *port; + struct usb_ep *ep; + + if (port_num != gserial_cons.index) { + pr_err("%s: port num [%d] is not support console\n", + __func__, port_num); + return -ENXIO; + } + + port = ports[port_num].port; + ep = port->port_usb->in; + if (!info->console_req) { + info->console_req = gs_request_new(ep); + if (!info->console_req) + return -ENOMEM; + info->console_req->complete = gs_complete_out; + } + + info->port = port; + spin_lock(&info->con_lock); + info->req_busy = 0; + spin_unlock(&info->con_lock); + pr_vdebug("port[%d] console connect!\n", port_num); + return 0; +} + +static void gs_console_disconnect(struct usb_ep *ep) +{ + struct gscons_info *info = &gscons_info; + struct usb_request *req = info->console_req; + + gs_request_free(req, ep); + info->console_req = NULL; +} + +static int gs_console_thread(void *data) +{ + struct gscons_info *info = &gscons_info; + struct gs_port *port; + struct usb_request *req; + struct usb_ep *ep; + int xfer, ret, count, size; + + do { + port = info->port; + set_current_state(TASK_INTERRUPTIBLE); + if (!port || !port->port_usb + || !port->port_usb->in || !info->console_req) + goto sched; + + req = info->console_req; + ep = port->port_usb->in; + + spin_lock_irq(&info->con_lock); + count = gs_buf_data_avail(&info->con_buf); + size = ep->maxpacket; + + if (count > 0 && !info->req_busy) { + set_current_state(TASK_RUNNING); + if (count < size) + size = count; + + xfer = gs_buf_get(&info->con_buf, req->buf, size); + req->length = xfer; + + spin_unlock(&info->con_lock); + ret = usb_ep_queue(ep, req, GFP_ATOMIC); + spin_lock(&info->con_lock); + if (ret < 0) + info->req_busy = 0; + else + info->req_busy = 1; + + spin_unlock_irq(&info->con_lock); + } else { + spin_unlock_irq(&info->con_lock); +sched: + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } + schedule(); + } + } while (1); + + return 0; +} + +static int gs_console_setup(struct console *co, char *options) +{ + struct gscons_info *info = &gscons_info; + int status; + + info->port = NULL; + info->console_req = NULL; + info->req_busy = 0; + spin_lock_init(&info->con_lock); + + status = gs_buf_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE); + if (status) { + pr_err("%s: allocate console buffer failed\n", __func__); + return status; + } + + info->console_thread = kthread_create(gs_console_thread, + co, "gs_console"); + if (IS_ERR(info->console_thread)) { + pr_err("%s: cannot create console thread\n", __func__); + gs_buf_free(&info->con_buf); + return PTR_ERR(info->console_thread); + } + wake_up_process(info->console_thread); + + return 0; +} + +static void gs_console_write(struct console *co, + const char *buf, unsigned count) +{ + struct gscons_info *info = &gscons_info; + unsigned long flags; + + spin_lock_irqsave(&info->con_lock, flags); + gs_buf_put(&info->con_buf, buf, count); + spin_unlock_irqrestore(&info->con_lock, flags); + + wake_up_process(info->console_thread); +} + +static struct tty_driver *gs_console_device(struct console *co, int *index) +{ + struct tty_driver **p = (struct tty_driver **)co->data; + + if (!*p) + return NULL; + + *index = co->index; + return *p; +} + +static struct console gserial_cons = { + .name = "ttyGS", + .write = gs_console_write, + .device = gs_console_device, + .setup = gs_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &gs_tty_driver, +}; + +static void gserial_console_init(void) +{ + register_console(&gserial_cons); +} + +static void gserial_console_exit(void) +{ + struct gscons_info *info = &gscons_info; + + unregister_console(&gserial_cons); + kthread_stop(info->console_thread); + gs_buf_free(&info->con_buf); +} + +#else + +static int gs_console_connect(int port_num) +{ + return 0; +} + +static void gs_console_disconnect(struct usb_ep *ep) +{ +} + +static void gserial_console_init(void) +{ +} + +static void gserial_console_exit(void) +{ +} + +#endif + static int gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) { @@ -1096,6 +1350,7 @@ void gserial_free_line(unsigned char port_num) gserial_free_port(port); tty_unregister_device(gs_tty_driver, port_num); + gserial_console_exit(); } EXPORT_SYMBOL_GPL(gserial_free_line); @@ -1138,6 +1393,7 @@ int gserial_alloc_line(unsigned char *line_num) goto err; } *line_num = port_num; + gserial_console_init(); err: return ret; } @@ -1219,6 +1475,7 @@ int gserial_connect(struct gserial *gser, u8 port_num) gser->disconnect(gser); } + status = gs_console_connect(port_num); spin_unlock_irqrestore(&port->port_lock, flags); return status; @@ -1277,6 +1534,7 @@ void gserial_disconnect(struct gserial *gser) port->read_allocated = port->read_started = port->write_allocated = port->write_started = 0; + gs_console_disconnect(gser->in); spin_unlock_irqrestore(&port->port_lock, flags); } EXPORT_SYMBOL_GPL(gserial_disconnect); diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c index 51d4a1703af2..912694f3d54e 100644 --- a/drivers/usb/gadget/function/uvc_queue.c +++ b/drivers/usb/gadget/function/uvc_queue.c @@ -41,7 +41,7 @@ * videobuf2 queue operations */ -static int uvc_queue_setup(struct vb2_queue *vq, const void *parg, +static int uvc_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { @@ -329,7 +329,7 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, buf->buf.field = V4L2_FIELD_NONE; buf->buf.sequence = queue->sequence++; - v4l2_get_timestamp(&buf->buf.timestamp); + buf->buf.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index 4b158e2d1e57..c16089efc322 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -40,7 +40,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_MISC /* 0xEF */, .bDeviceSubClass = 2, diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index 685cf3b4b78f..5d7b3c6a422b 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -123,7 +123,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x200), + /* .bcdUSB = DYNAMIC */ #ifdef CONFIG_GADGET_UAC1 .bDeviceClass = USB_CLASS_PER_INTERFACE, diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index ecd8c8d62f2e..51c08682de84 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -43,7 +43,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index 31e9160223e9..25a2c2e48592 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -151,7 +151,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16 (0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 320a81b2baa6..f85639ef8a8f 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -69,7 +69,7 @@ static struct usb_device_descriptor gfs_dev_desc = { .bLength = sizeof gfs_dev_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = cpu_to_le16(GFS_VENDOR_ID), diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c index 8a18348ae86e..fc2ac150f5ff 100644 --- a/drivers/usb/gadget/legacy/gmidi.c +++ b/drivers/usb/gadget/legacy/gmidi.c @@ -21,19 +21,12 @@ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> -#include <linux/slab.h> #include <linux/module.h> -#include <linux/device.h> -#include <sound/core.h> #include <sound/initval.h> -#include <sound/rawmidi.h> -#include <linux/usb/ch9.h> #include <linux/usb/composite.h> #include <linux/usb/gadget.h> -#include <linux/usb/audio.h> -#include <linux/usb/midi.h> #include "u_midi.h" @@ -42,7 +35,6 @@ MODULE_AUTHOR("Ben Williamson"); MODULE_LICENSE("GPL v2"); -static const char shortname[] = "g_midi"; static const char longname[] = "MIDI Gadget"; USB_GADGET_COMPOSITE_OPTIONS(); @@ -61,7 +53,7 @@ MODULE_PARM_DESC(buflen, "MIDI buffer length"); static unsigned int qlen = 32; module_param(qlen, uint, S_IRUGO); -MODULE_PARM_DESC(qlen, "USB read request queue length"); +MODULE_PARM_DESC(qlen, "USB read and write request queue length"); static unsigned int in_ports = 1; module_param(in_ports, uint, S_IRUGO); @@ -86,7 +78,7 @@ MODULE_PARM_DESC(out_ports, "Number of MIDI output ports"); static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = cpu_to_le16(DRIVER_VENDOR_NUM), .idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM), diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c index 7e5d2c48476e..a71a884f79fc 100644 --- a/drivers/usb/gadget/legacy/hid.c +++ b/drivers/usb/gadget/legacy/hid.c @@ -47,7 +47,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ /* .bDeviceClass = USB_CLASS_COMM, */ /* .bDeviceSubClass = 0, */ diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index f454c7af489c..365afd7e14f8 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1137,10 +1137,9 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) dev->gadget->ep0, dev->req, GFP_KERNEL); } + spin_lock_irq(&dev->lock); if (retval < 0) { - spin_lock_irq (&dev->lock); clean_req (dev->gadget->ep0, dev->req); - spin_unlock_irq (&dev->lock); } else retval = len; diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c index bda3c519110f..e61af53c7d2b 100644 --- a/drivers/usb/gadget/legacy/mass_storage.c +++ b/drivers/usb/gadget/legacy/mass_storage.c @@ -55,7 +55,7 @@ static struct usb_device_descriptor msg_device_desc = { .bLength = sizeof msg_device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_PER_INTERFACE, /* Vendor and product id can be overridden by module parameters. */ diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c index 4fe794ddcd49..229d704a620b 100644 --- a/drivers/usb/gadget/legacy/multi.c +++ b/drivers/usb/gadget/legacy/multi.c @@ -67,7 +67,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_MISC /* 0xEF */, .bDeviceSubClass = 2, diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index 2bae4381332d..0aba68253e3d 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -49,7 +49,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16 (0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c index 8b3f6fb1825d..09975046c694 100644 --- a/drivers/usb/gadget/legacy/nokia.c +++ b/drivers/usb/gadget/legacy/nokia.c @@ -89,7 +89,7 @@ static struct usb_gadget_strings *dev_strings[] = { static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_COMM, .idVendor = cpu_to_le16(NOKIA_VENDOR_ID), .idProduct = cpu_to_le16(NOKIA_PRODUCT_ID), diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c index a22d30a4def1..6f969a86175c 100644 --- a/drivers/usb/gadget/legacy/printer.c +++ b/drivers/usb/gadget/legacy/printer.c @@ -71,7 +71,7 @@ static struct usb_function *f_printer; static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_PER_INTERFACE, .bDeviceSubClass = 0, .bDeviceProtocol = 0, diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c index c5d42e0347a9..9d89adce756d 100644 --- a/drivers/usb/gadget/legacy/serial.c +++ b/drivers/usb/gadget/legacy/serial.c @@ -65,7 +65,7 @@ static struct usb_gadget_strings *dev_strings[] = { static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ /* .bDeviceClass = f(use_acm) */ .bDeviceSubClass = 0, .bDeviceProtocol = 0, diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 22e56158d585..7857fa411636 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -1974,7 +1974,7 @@ static struct usb_descriptor_header *uasp_ss_function_desc[] = { static struct usb_device_descriptor usbg_device_desc = { .bLength = sizeof(usbg_device_desc), .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = cpu_to_le16(UAS_VENDOR_ID), .idProduct = cpu_to_le16(UAS_PRODUCT_ID), diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index 72c976bf3530..f9661cd627c8 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -77,7 +77,7 @@ static struct usb_function *f_uvc; static struct usb_device_descriptor webcam_device_descriptor = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_MISC, .bDeviceSubClass = 0x02, .bDeviceProtocol = 0x01, diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index 37a410056fed..d02e2ce73ea5 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c @@ -68,6 +68,8 @@ static struct usb_zero_options gzero_options = { .isoc_maxpacket = GZERO_ISOC_MAXPACKET, .bulk_buflen = GZERO_BULK_BUFLEN, .qlen = GZERO_QLEN, + .ss_bulk_qlen = GZERO_SS_BULK_QLEN, + .ss_iso_qlen = GZERO_SS_ISO_QLEN, }; /*-------------------------------------------------------------------------*/ @@ -113,7 +115,7 @@ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, - .bcdUSB = cpu_to_le16(0x0200), + /* .bcdUSB = DYNAMIC */ .bDeviceClass = USB_CLASS_VENDOR_SPEC, .idVendor = cpu_to_le16(DRIVER_VENDOR_NUM), @@ -255,6 +257,14 @@ static struct usb_function_instance *func_inst_lb; module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(qlen, "depth of loopback queue"); +module_param_named(ss_bulk_qlen, gzero_options.ss_bulk_qlen, uint, + S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(bulk_qlen, "depth of sourcesink queue for bulk transfer"); + +module_param_named(ss_iso_qlen, gzero_options.ss_iso_qlen, uint, + S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(iso_qlen, "depth of sourcesink queue for iso transfer"); + static int zero_bind(struct usb_composite_dev *cdev) { struct f_ss_opts *ss_opts; @@ -285,6 +295,8 @@ static int zero_bind(struct usb_composite_dev *cdev) ss_opts->isoc_mult = gzero_options.isoc_mult; ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; ss_opts->bulk_buflen = gzero_options.bulk_buflen; + ss_opts->bulk_qlen = gzero_options.ss_bulk_qlen; + ss_opts->iso_qlen = gzero_options.ss_iso_qlen; func_ss = usb_get_function(func_inst_ss); if (IS_ERR(func_ss)) { diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c index c6276f0268ae..4bc7eea8bfc8 100644 --- a/drivers/usb/gadget/u_f.c +++ b/drivers/usb/gadget/u_f.c @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include <linux/usb/gadget.h> #include "u_f.h" struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 1d5f0eb68552..4247cc098a89 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -16,6 +16,8 @@ #ifndef __U_F_H__ #define __U_F_H__ +#include <linux/usb/gadget.h> + /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 #define vla_group_size(groupname) groupname##__next @@ -45,8 +47,12 @@ struct usb_ep; struct usb_request; +/* Requests allocated via alloc_ep_req() must be freed by free_ep_req(). */ struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len); +static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} #endif /* __U_F_H__ */ - - diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index cdbff54e07ac..753c29bd11ad 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -174,6 +174,17 @@ config USB_RENESAS_USBHS_UDC dynamically linked module called "renesas_usbhs" and force all gadget drivers to also be dynamically linked. +config USB_RENESAS_USB3 + tristate 'Renesas USB3.0 Peripheral controller' + depends on ARCH_SHMOBILE || COMPILE_TEST + help + Renesas USB3.0 Peripheral controller is a USB peripheral controller + that supports super, high, and full speed USB 3.0 data transfers. + + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "renesas_usb3" and force all + gadget drivers to also be dynamically linked. + config USB_PXA27X tristate "PXA 27x" help diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile index fba2049bf985..dfee53446319 100644 --- a/drivers/usb/gadget/udc/Makefile +++ b/drivers/usb/gadget/udc/Makefile @@ -19,6 +19,7 @@ fsl_usb2_udc-y := fsl_udc_core.o fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o obj-$(CONFIG_USB_M66592) += m66592-udc.o obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o +obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c index 8cbb00325824..f5fccb3e4152 100644 --- a/drivers/usb/gadget/udc/bcm63xx_udc.c +++ b/drivers/usb/gadget/udc/bcm63xx_udc.c @@ -1083,7 +1083,7 @@ static int bcm63xx_ep_disable(struct usb_ep *ep) struct bcm63xx_ep *bep = our_ep(ep); struct bcm63xx_udc *udc = bep->udc; struct iudma_ch *iudma = bep->iudma; - struct list_head *pos, *n; + struct bcm63xx_req *breq, *n; unsigned long flags; if (!ep || !ep->desc) @@ -1099,10 +1099,7 @@ static int bcm63xx_ep_disable(struct usb_ep *ep) iudma_reset_channel(udc, iudma); if (!list_empty(&bep->queue)) { - list_for_each_safe(pos, n, &bep->queue) { - struct bcm63xx_req *breq = - list_entry(pos, struct bcm63xx_req, queue); - + list_for_each_entry_safe(breq, n, &bep->queue, queue) { usb_gadget_unmap_request(&udc->gadget, &breq->req, iudma->is_tx); list_del(&breq->queue); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index b9429bc42511..39b7136d31d9 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -253,13 +253,12 @@ static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags) dma_addr_t paddr; struct gr_dma_desc *dma_desc; - dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr); + dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr); if (!dma_desc) { dev_err(ep->dev->dev, "Could not allocate from DMA pool\n"); return NULL; } - memset(dma_desc, 0, sizeof(*dma_desc)); dma_desc->paddr = paddr; return dma_desc; diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 00b5006baf15..79fe6b77ee44 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -28,42 +28,29 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> +#include <linux/clk.h> #include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/list.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/proc_fs.h> -#include <linux/clk.h> +#include <linux/slab.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> -#include <linux/i2c.h> -#include <linux/kthread.h> -#include <linux/freezer.h> -#include <linux/dma-mapping.h> -#include <linux/dmapool.h> -#include <linux/workqueue.h> -#include <linux/of.h> #include <linux/usb/isp1301.h> -#include <asm/byteorder.h> -#include <mach/hardware.h> -#include <linux/io.h> -#include <asm/irq.h> - -#include <mach/platform.h> -#include <mach/irqs.h> -#include <mach/board.h> #ifdef CONFIG_USB_GADGET_DEBUG_FILES #include <linux/debugfs.h> #include <linux/seq_file.h> #endif +#include <mach/hardware.h> +#include <mach/platform.h> + /* * USB device configuration structure */ diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c new file mode 100644 index 000000000000..93a3bec81df7 --- /dev/null +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -0,0 +1,1975 @@ +/* + * Renesas USB3.0 Peripheral driver (USB gadget) + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> + +/* register definitions */ +#define USB3_AXI_INT_STA 0x008 +#define USB3_AXI_INT_ENA 0x00c +#define USB3_DMA_INT_STA 0x010 +#define USB3_DMA_INT_ENA 0x014 +#define USB3_USB_COM_CON 0x200 +#define USB3_USB20_CON 0x204 +#define USB3_USB30_CON 0x208 +#define USB3_USB_STA 0x210 +#define USB3_DRD_CON 0x218 +#define USB3_USB_INT_STA_1 0x220 +#define USB3_USB_INT_STA_2 0x224 +#define USB3_USB_INT_ENA_1 0x228 +#define USB3_USB_INT_ENA_2 0x22c +#define USB3_STUP_DAT_0 0x230 +#define USB3_STUP_DAT_1 0x234 +#define USB3_P0_MOD 0x280 +#define USB3_P0_CON 0x288 +#define USB3_P0_STA 0x28c +#define USB3_P0_INT_STA 0x290 +#define USB3_P0_INT_ENA 0x294 +#define USB3_P0_LNG 0x2a0 +#define USB3_P0_READ 0x2a4 +#define USB3_P0_WRITE 0x2a8 +#define USB3_PIPE_COM 0x2b0 +#define USB3_PN_MOD 0x2c0 +#define USB3_PN_RAMMAP 0x2c4 +#define USB3_PN_CON 0x2c8 +#define USB3_PN_STA 0x2cc +#define USB3_PN_INT_STA 0x2d0 +#define USB3_PN_INT_ENA 0x2d4 +#define USB3_PN_LNG 0x2e0 +#define USB3_PN_READ 0x2e4 +#define USB3_PN_WRITE 0x2e8 +#define USB3_SSIFCMD 0x340 + +/* AXI_INT_ENA and AXI_INT_STA */ +#define AXI_INT_DMAINT BIT(31) +#define AXI_INT_EPCINT BIT(30) + +/* LCLKSEL */ +#define LCLKSEL_LSEL BIT(18) + +/* USB_COM_CON */ +#define USB_COM_CON_CONF BIT(24) +#define USB_COM_CON_SPD_MODE BIT(17) +#define USB_COM_CON_EP0_EN BIT(16) +#define USB_COM_CON_DEV_ADDR_SHIFT 8 +#define USB_COM_CON_DEV_ADDR_MASK GENMASK(14, USB_COM_CON_DEV_ADDR_SHIFT) +#define USB_COM_CON_DEV_ADDR(n) (((n) << USB_COM_CON_DEV_ADDR_SHIFT) & \ + USB_COM_CON_DEV_ADDR_MASK) +#define USB_COM_CON_RX_DETECTION BIT(1) +#define USB_COM_CON_PIPE_CLR BIT(0) + +/* USB20_CON */ +#define USB20_CON_B2_PUE BIT(31) +#define USB20_CON_B2_SUSPEND BIT(24) +#define USB20_CON_B2_CONNECT BIT(17) +#define USB20_CON_B2_TSTMOD_SHIFT 8 +#define USB20_CON_B2_TSTMOD_MASK GENMASK(10, USB20_CON_B2_TSTMOD_SHIFT) +#define USB20_CON_B2_TSTMOD(n) (((n) << USB20_CON_B2_TSTMOD_SHIFT) & \ + USB20_CON_B2_TSTMOD_MASK) +#define USB20_CON_B2_TSTMOD_EN BIT(0) + +/* USB30_CON */ +#define USB30_CON_POW_SEL_SHIFT 24 +#define USB30_CON_POW_SEL_MASK GENMASK(26, USB30_CON_POW_SEL_SHIFT) +#define USB30_CON_POW_SEL_IN_U3 BIT(26) +#define USB30_CON_POW_SEL_IN_DISCON 0 +#define USB30_CON_POW_SEL_P2_TO_P0 BIT(25) +#define USB30_CON_POW_SEL_P0_TO_P3 BIT(24) +#define USB30_CON_POW_SEL_P0_TO_P2 0 +#define USB30_CON_B3_PLLWAKE BIT(23) +#define USB30_CON_B3_CONNECT BIT(17) +#define USB30_CON_B3_HOTRST_CMP BIT(1) + +/* USB_STA */ +#define USB_STA_SPEED_MASK (BIT(2) | BIT(1)) +#define USB_STA_SPEED_HS BIT(2) +#define USB_STA_SPEED_FS BIT(1) +#define USB_STA_SPEED_SS 0 +#define USB_STA_VBUS_STA BIT(0) + +/* DRD_CON */ +#define DRD_CON_PERI_CON BIT(24) + +/* USB_INT_ENA_1 and USB_INT_STA_1 */ +#define USB_INT_1_B3_PLLWKUP BIT(31) +#define USB_INT_1_B3_LUPSUCS BIT(30) +#define USB_INT_1_B3_DISABLE BIT(27) +#define USB_INT_1_B3_WRMRST BIT(21) +#define USB_INT_1_B3_HOTRST BIT(20) +#define USB_INT_1_B2_USBRST BIT(12) +#define USB_INT_1_B2_L1SPND BIT(11) +#define USB_INT_1_B2_SPND BIT(9) +#define USB_INT_1_B2_RSUM BIT(8) +#define USB_INT_1_SPEED BIT(1) +#define USB_INT_1_VBUS_CNG BIT(0) + +/* USB_INT_ENA_2 and USB_INT_STA_2 */ +#define USB_INT_2_PIPE(n) BIT(n) + +/* P0_MOD */ +#define P0_MOD_DIR BIT(6) + +/* P0_CON and PN_CON */ +#define PX_CON_BYTE_EN_MASK (BIT(10) | BIT(9)) +#define PX_CON_BYTE_EN_SHIFT 9 +#define PX_CON_BYTE_EN_BYTES(n) (((n) << PX_CON_BYTE_EN_SHIFT) & \ + PX_CON_BYTE_EN_MASK) +#define PX_CON_SEND BIT(8) + +/* P0_CON */ +#define P0_CON_ST_RES_MASK (BIT(27) | BIT(26)) +#define P0_CON_ST_RES_FORCE_STALL BIT(27) +#define P0_CON_ST_RES_NORMAL BIT(26) +#define P0_CON_ST_RES_FORCE_NRDY 0 +#define P0_CON_OT_RES_MASK (BIT(25) | BIT(24)) +#define P0_CON_OT_RES_FORCE_STALL BIT(25) +#define P0_CON_OT_RES_NORMAL BIT(24) +#define P0_CON_OT_RES_FORCE_NRDY 0 +#define P0_CON_IN_RES_MASK (BIT(17) | BIT(16)) +#define P0_CON_IN_RES_FORCE_STALL BIT(17) +#define P0_CON_IN_RES_NORMAL BIT(16) +#define P0_CON_IN_RES_FORCE_NRDY 0 +#define P0_CON_RES_WEN BIT(7) +#define P0_CON_BCLR BIT(1) + +/* P0_STA and PN_STA */ +#define PX_STA_BUFSTS BIT(0) + +/* P0_INT_ENA and P0_INT_STA */ +#define P0_INT_STSED BIT(18) +#define P0_INT_STSST BIT(17) +#define P0_INT_SETUP BIT(16) +#define P0_INT_RCVNL BIT(8) +#define P0_INT_ERDY BIT(7) +#define P0_INT_FLOW BIT(6) +#define P0_INT_STALL BIT(2) +#define P0_INT_NRDY BIT(1) +#define P0_INT_BFRDY BIT(0) +#define P0_INT_ALL_BITS (P0_INT_STSED | P0_INT_SETUP | P0_INT_BFRDY) + +/* PN_MOD */ +#define PN_MOD_DIR BIT(6) +#define PN_MOD_TYPE_SHIFT 4 +#define PN_MOD_TYPE_MASK GENMASK(5, PN_MOD_TYPE_SHIFT) +#define PN_MOD_TYPE(n) (((n) << PN_MOD_TYPE_SHIFT) & \ + PN_MOD_TYPE_MASK) +#define PN_MOD_EPNUM_MASK GENMASK(3, 0) +#define PN_MOD_EPNUM(n) ((n) & PN_MOD_EPNUM_MASK) + +/* PN_RAMMAP */ +#define PN_RAMMAP_RAMAREA_SHIFT 29 +#define PN_RAMMAP_RAMAREA_MASK GENMASK(31, PN_RAMMAP_RAMAREA_SHIFT) +#define PN_RAMMAP_RAMAREA_16KB BIT(31) +#define PN_RAMMAP_RAMAREA_8KB (BIT(30) | BIT(29)) +#define PN_RAMMAP_RAMAREA_4KB BIT(30) +#define PN_RAMMAP_RAMAREA_2KB BIT(29) +#define PN_RAMMAP_RAMAREA_1KB 0 +#define PN_RAMMAP_MPKT_SHIFT 16 +#define PN_RAMMAP_MPKT_MASK GENMASK(26, PN_RAMMAP_MPKT_SHIFT) +#define PN_RAMMAP_MPKT(n) (((n) << PN_RAMMAP_MPKT_SHIFT) & \ + PN_RAMMAP_MPKT_MASK) +#define PN_RAMMAP_RAMIF_SHIFT 14 +#define PN_RAMMAP_RAMIF_MASK GENMASK(15, PN_RAMMAP_RAMIF_SHIFT) +#define PN_RAMMAP_RAMIF(n) (((n) << PN_RAMMAP_RAMIF_SHIFT) & \ + PN_RAMMAP_RAMIF_MASK) +#define PN_RAMMAP_BASEAD_MASK GENMASK(13, 0) +#define PN_RAMMAP_BASEAD(offs) (((offs) >> 3) & PN_RAMMAP_BASEAD_MASK) +#define PN_RAMMAP_DATA(area, ramif, basead) ((PN_RAMMAP_##area) | \ + (PN_RAMMAP_RAMIF(ramif)) | \ + (PN_RAMMAP_BASEAD(basead))) + +/* PN_CON */ +#define PN_CON_EN BIT(31) +#define PN_CON_DATAIF_EN BIT(30) +#define PN_CON_RES_MASK (BIT(17) | BIT(16)) +#define PN_CON_RES_FORCE_STALL BIT(17) +#define PN_CON_RES_NORMAL BIT(16) +#define PN_CON_RES_FORCE_NRDY 0 +#define PN_CON_LAST BIT(11) +#define PN_CON_RES_WEN BIT(7) +#define PN_CON_CLR BIT(0) + +/* PN_INT_STA and PN_INT_ENA */ +#define PN_INT_LSTTR BIT(4) +#define PN_INT_BFRDY BIT(0) + +/* USB3_SSIFCMD */ +#define SSIFCMD_URES_U2 BIT(9) +#define SSIFCMD_URES_U1 BIT(8) +#define SSIFCMD_UDIR_U2 BIT(7) +#define SSIFCMD_UDIR_U1 BIT(6) +#define SSIFCMD_UREQ_U2 BIT(5) +#define SSIFCMD_UREQ_U1 BIT(4) + +#define USB3_EP0_SS_MAX_PACKET_SIZE 512 +#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64 +#define USB3_EP0_BUF_SIZE 8 +#define USB3_MAX_NUM_PIPES 30 +#define USB3_WAIT_US 3 + +struct renesas_usb3; +struct renesas_usb3_request { + struct usb_request req; + struct list_head queue; +}; + +#define USB3_EP_NAME_SIZE 8 +struct renesas_usb3_ep { + struct usb_ep ep; + struct renesas_usb3 *usb3; + int num; + char ep_name[USB3_EP_NAME_SIZE]; + struct list_head queue; + u32 rammap_val; + bool dir_in; + bool halt; + bool wedge; + bool started; +}; + +struct renesas_usb3_priv { + int ramsize_per_ramif; /* unit = bytes */ + int num_ramif; + int ramsize_per_pipe; /* unit = bytes */ + bool workaround_for_vbus; /* if true, don't check vbus signal */ +}; + +struct renesas_usb3 { + void __iomem *reg; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + + struct renesas_usb3_ep *usb3_ep; + int num_usb3_eps; + + spinlock_t lock; + int disabled_count; + + struct usb_request *ep0_req; + u16 test_mode; + u8 ep0_buf[USB3_EP0_BUF_SIZE]; + bool softconnect; + bool workaround_for_vbus; +}; + +#define gadget_to_renesas_usb3(_gadget) \ + container_of(_gadget, struct renesas_usb3, gadget) +#define renesas_usb3_to_gadget(renesas_usb3) (&renesas_usb3->gadget) +#define usb3_to_dev(_usb3) (_usb3->gadget.dev.parent) + +#define usb_ep_to_usb3_ep(_ep) container_of(_ep, struct renesas_usb3_ep, ep) +#define usb3_ep_to_usb3(_usb3_ep) (_usb3_ep->usb3) +#define usb_req_to_usb3_req(_req) container_of(_req, \ + struct renesas_usb3_request, req) + +#define usb3_get_ep(usb3, n) ((usb3)->usb3_ep + (n)) +#define usb3_for_each_ep(usb3_ep, usb3, i) \ + for ((i) = 0, usb3_ep = usb3_get_ep(usb3, (i)); \ + (i) < (usb3)->num_usb3_eps; \ + (i)++, usb3_ep = usb3_get_ep(usb3, (i))) + +static const char udc_name[] = "renesas_usb3"; + +static void usb3_write(struct renesas_usb3 *usb3, u32 data, u32 offs) +{ + iowrite32(data, usb3->reg + offs); +} + +static u32 usb3_read(struct renesas_usb3 *usb3, u32 offs) +{ + return ioread32(usb3->reg + offs); +} + +static void usb3_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs) +{ + u32 val = usb3_read(usb3, offs); + + val |= bits; + usb3_write(usb3, val, offs); +} + +static void usb3_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs) +{ + u32 val = usb3_read(usb3, offs); + + val &= ~bits; + usb3_write(usb3, val, offs); +} + +static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask, + u32 expected) +{ + int i; + + for (i = 0; i < USB3_WAIT_US; i++) { + if ((usb3_read(usb3, reg) & mask) == expected) + return 0; + udelay(1); + } + + dev_dbg(usb3_to_dev(usb3), "%s: timed out (%8x, %08x, %08x)\n", + __func__, reg, mask, expected); + + return -EBUSY; +} + +static void usb3_enable_irq_1(struct renesas_usb3 *usb3, u32 bits) +{ + usb3_set_bit(usb3, bits, USB3_USB_INT_ENA_1); +} + +static void usb3_disable_irq_1(struct renesas_usb3 *usb3, u32 bits) +{ + usb3_clear_bit(usb3, bits, USB3_USB_INT_ENA_1); +} + +static void usb3_enable_pipe_irq(struct renesas_usb3 *usb3, int num) +{ + usb3_set_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2); +} + +static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num) +{ + usb3_clear_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2); +} + +static void usb3_init_axi_bridge(struct renesas_usb3 *usb3) +{ + /* Set AXI_INT */ + usb3_write(usb3, ~0, USB3_DMA_INT_STA); + usb3_write(usb3, 0, USB3_DMA_INT_ENA); + usb3_set_bit(usb3, AXI_INT_DMAINT | AXI_INT_EPCINT, USB3_AXI_INT_ENA); +} + +static void usb3_init_epc_registers(struct renesas_usb3 *usb3) +{ + /* FIXME: How to change host / peripheral mode as well? */ + usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON); + + usb3_write(usb3, ~0, USB3_USB_INT_STA_1); + usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); +} + +static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3) +{ + if (!(usb3_read(usb3, USB3_USB20_CON) & USB20_CON_B2_SUSPEND)) + return true; /* already waked it up */ + + usb3_clear_bit(usb3, USB20_CON_B2_SUSPEND, USB3_USB20_CON); + usb3_enable_irq_1(usb3, USB_INT_1_B2_RSUM); + + return false; +} + +static void usb3_usb2_pullup(struct renesas_usb3 *usb3, int pullup) +{ + u32 bits = USB20_CON_B2_PUE | USB20_CON_B2_CONNECT; + + if (usb3->softconnect && pullup) + usb3_set_bit(usb3, bits, USB3_USB20_CON); + else + usb3_clear_bit(usb3, bits, USB3_USB20_CON); +} + +static void usb3_set_test_mode(struct renesas_usb3 *usb3) +{ + u32 val = usb3_read(usb3, USB3_USB20_CON); + + val &= ~USB20_CON_B2_TSTMOD_MASK; + val |= USB20_CON_B2_TSTMOD(usb3->test_mode); + usb3_write(usb3, val | USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON); + if (!usb3->test_mode) + usb3_clear_bit(usb3, USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON); +} + +static void usb3_start_usb2_connection(struct renesas_usb3 *usb3) +{ + usb3->disabled_count++; + usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON); + usb3_set_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON); + usb3_usb2_pullup(usb3, 1); +} + +static int usb3_is_usb3_phy_in_u3(struct renesas_usb3 *usb3) +{ + return usb3_read(usb3, USB3_USB30_CON) & USB30_CON_POW_SEL_IN_U3; +} + +static bool usb3_wakeup_usb3_phy(struct renesas_usb3 *usb3) +{ + if (!usb3_is_usb3_phy_in_u3(usb3)) + return true; /* already waked it up */ + + usb3_set_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON); + usb3_enable_irq_1(usb3, USB_INT_1_B3_PLLWKUP); + + return false; +} + +static u16 usb3_feature_get_un_enabled(struct renesas_usb3 *usb3) +{ + u32 mask_u2 = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2; + u32 mask_u1 = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1; + u32 val = usb3_read(usb3, USB3_SSIFCMD); + u16 ret = 0; + + /* Enables {U2,U1} if the bits of UDIR and UREQ are set to 0 */ + if (!(val & mask_u2)) + ret |= 1 << USB_DEV_STAT_U2_ENABLED; + if (!(val & mask_u1)) + ret |= 1 << USB_DEV_STAT_U1_ENABLED; + + return ret; +} + +static void usb3_feature_u2_enable(struct renesas_usb3 *usb3, bool enable) +{ + u32 bits = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2; + + /* Enables U2 if the bits of UDIR and UREQ are set to 0 */ + if (enable) + usb3_clear_bit(usb3, bits, USB3_SSIFCMD); + else + usb3_set_bit(usb3, bits, USB3_SSIFCMD); +} + +static void usb3_feature_u1_enable(struct renesas_usb3 *usb3, bool enable) +{ + u32 bits = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1; + + /* Enables U1 if the bits of UDIR and UREQ are set to 0 */ + if (enable) + usb3_clear_bit(usb3, bits, USB3_SSIFCMD); + else + usb3_set_bit(usb3, bits, USB3_SSIFCMD); +} + +static void usb3_start_operation_for_usb3(struct renesas_usb3 *usb3) +{ + usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON); + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON); + usb3_set_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); +} + +static void usb3_start_usb3_connection(struct renesas_usb3 *usb3) +{ + usb3_start_operation_for_usb3(usb3); + usb3_set_bit(usb3, USB_COM_CON_RX_DETECTION, USB3_USB_COM_CON); + + usb3_enable_irq_1(usb3, USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE | + USB_INT_1_SPEED); +} + +static void usb3_stop_usb3_connection(struct renesas_usb3 *usb3) +{ + usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); +} + +static void usb3_transition_to_default_state(struct renesas_usb3 *usb3, + bool is_usb3) +{ + usb3_set_bit(usb3, USB_INT_2_PIPE(0), USB3_USB_INT_ENA_2); + usb3_write(usb3, P0_INT_ALL_BITS, USB3_P0_INT_STA); + usb3_set_bit(usb3, P0_INT_ALL_BITS, USB3_P0_INT_ENA); + + if (is_usb3) + usb3_enable_irq_1(usb3, USB_INT_1_B3_WRMRST | + USB_INT_1_B3_HOTRST); + else + usb3_enable_irq_1(usb3, USB_INT_1_B2_SPND | + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST); +} + +static void usb3_connect(struct renesas_usb3 *usb3) +{ + if (usb3_wakeup_usb3_phy(usb3)) + usb3_start_usb3_connection(usb3); +} + +static void usb3_reset_epc(struct renesas_usb3 *usb3) +{ + usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON); + usb3_clear_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON); + usb3_set_bit(usb3, USB_COM_CON_PIPE_CLR, USB3_USB_COM_CON); + usb3->test_mode = 0; + usb3_set_test_mode(usb3); +} + +static void usb3_disconnect(struct renesas_usb3 *usb3) +{ + usb3->disabled_count = 0; + usb3_usb2_pullup(usb3, 0); + usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); + usb3_reset_epc(usb3); + + if (usb3->driver) + usb3->driver->disconnect(&usb3->gadget); +} + +static void usb3_check_vbus(struct renesas_usb3 *usb3) +{ + if (usb3->workaround_for_vbus) { + usb3_connect(usb3); + } else { + if (usb3_read(usb3, USB3_USB_STA) & USB_STA_VBUS_STA) + usb3_connect(usb3); + else + usb3_disconnect(usb3); + } +} + +static void renesas_usb3_init_controller(struct renesas_usb3 *usb3) +{ + usb3_init_axi_bridge(usb3); + usb3_init_epc_registers(usb3); + + usb3_check_vbus(usb3); +} + +static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3) +{ + usb3_disconnect(usb3); + usb3_write(usb3, 0, USB3_P0_INT_ENA); + usb3_write(usb3, 0, USB3_PN_INT_ENA); + usb3_write(usb3, 0, USB3_USB_INT_ENA_1); + usb3_write(usb3, 0, USB3_USB_INT_ENA_2); + usb3_write(usb3, 0, USB3_AXI_INT_ENA); +} + +static void usb3_irq_epc_int_1_pll_wakeup(struct renesas_usb3 *usb3) +{ + usb3_disable_irq_1(usb3, USB_INT_1_B3_PLLWKUP); + usb3_clear_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON); + usb3_start_usb3_connection(usb3); +} + +static void usb3_irq_epc_int_1_linkup_success(struct renesas_usb3 *usb3) +{ + usb3_transition_to_default_state(usb3, true); +} + +static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3) +{ + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM); + usb3_start_usb2_connection(usb3); + usb3_transition_to_default_state(usb3, false); +} + +static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3) +{ + usb3_stop_usb3_connection(usb3); + if (usb3_wakeup_usb2_phy(usb3)) + usb3_irq_epc_int_1_resume(usb3); +} + +static void usb3_irq_epc_int_1_bus_reset(struct renesas_usb3 *usb3) +{ + usb3_reset_epc(usb3); + if (usb3->disabled_count < 3) + usb3_start_usb3_connection(usb3); + else + usb3_start_usb2_connection(usb3); +} + +static void usb3_irq_epc_int_1_vbus_change(struct renesas_usb3 *usb3) +{ + usb3_check_vbus(usb3); +} + +static void usb3_irq_epc_int_1_hot_reset(struct renesas_usb3 *usb3) +{ + usb3_reset_epc(usb3); + usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON); + + /* This bit shall be set within 12ms from the start of HotReset */ + usb3_set_bit(usb3, USB30_CON_B3_HOTRST_CMP, USB3_USB30_CON); +} + +static void usb3_irq_epc_int_1_warm_reset(struct renesas_usb3 *usb3) +{ + usb3_reset_epc(usb3); + usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON); + + usb3_start_operation_for_usb3(usb3); + usb3_enable_irq_1(usb3, USB_INT_1_SPEED); +} + +static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3) +{ + u32 speed = usb3_read(usb3, USB3_USB_STA) & USB_STA_SPEED_MASK; + + switch (speed) { + case USB_STA_SPEED_SS: + usb3->gadget.speed = USB_SPEED_SUPER; + break; + case USB_STA_SPEED_HS: + usb3->gadget.speed = USB_SPEED_HIGH; + break; + case USB_STA_SPEED_FS: + usb3->gadget.speed = USB_SPEED_FULL; + break; + default: + usb3->gadget.speed = USB_SPEED_UNKNOWN; + break; + } +} + +static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1) +{ + if (int_sta_1 & USB_INT_1_B3_PLLWKUP) + usb3_irq_epc_int_1_pll_wakeup(usb3); + + if (int_sta_1 & USB_INT_1_B3_LUPSUCS) + usb3_irq_epc_int_1_linkup_success(usb3); + + if (int_sta_1 & USB_INT_1_B3_HOTRST) + usb3_irq_epc_int_1_hot_reset(usb3); + + if (int_sta_1 & USB_INT_1_B3_WRMRST) + usb3_irq_epc_int_1_warm_reset(usb3); + + if (int_sta_1 & USB_INT_1_B3_DISABLE) + usb3_irq_epc_int_1_disable(usb3); + + if (int_sta_1 & USB_INT_1_B2_USBRST) + usb3_irq_epc_int_1_bus_reset(usb3); + + if (int_sta_1 & USB_INT_1_B2_RSUM) + usb3_irq_epc_int_1_resume(usb3); + + if (int_sta_1 & USB_INT_1_SPEED) + usb3_irq_epc_int_1_speed(usb3); + + if (int_sta_1 & USB_INT_1_VBUS_CNG) + usb3_irq_epc_int_1_vbus_change(usb3); +} + +static struct renesas_usb3_request *__usb3_get_request(struct renesas_usb3_ep + *usb3_ep) +{ + return list_first_entry_or_null(&usb3_ep->queue, + struct renesas_usb3_request, queue); +} + +static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep + *usb3_ep) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + struct renesas_usb3_request *usb3_req; + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + usb3_req = __usb3_get_request(usb3_ep); + spin_unlock_irqrestore(&usb3->lock, flags); + + return usb3_req; +} + +static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, int status) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", + usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, + status); + usb3_req->req.status = status; + spin_lock_irqsave(&usb3->lock, flags); + usb3_ep->started = false; + list_del_init(&usb3_req->queue); + spin_unlock_irqrestore(&usb3->lock, flags); + usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); +} + +static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0); + struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + + if (usb3_req) + usb3_request_done(usb3_ep, usb3_req, 0); + if (usb3->test_mode) + usb3_set_test_mode(usb3); +} + +static void usb3_get_setup_data(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0); + u32 *data = (u32 *)ctrl; + + *data++ = usb3_read(usb3, USB3_STUP_DAT_0); + *data = usb3_read(usb3, USB3_STUP_DAT_1); + + /* update this driver's flag */ + usb3_ep->dir_in = !!(ctrl->bRequestType & USB_DIR_IN); +} + +static void usb3_set_p0_con_update_res(struct renesas_usb3 *usb3, u32 res) +{ + u32 val = usb3_read(usb3, USB3_P0_CON); + + val &= ~(P0_CON_ST_RES_MASK | P0_CON_OT_RES_MASK | P0_CON_IN_RES_MASK); + val |= res | P0_CON_RES_WEN; + usb3_write(usb3, val, USB3_P0_CON); +} + +static void usb3_set_p0_con_for_ctrl_read_data(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY | + P0_CON_OT_RES_FORCE_STALL | + P0_CON_IN_RES_NORMAL); +} + +static void usb3_set_p0_con_for_ctrl_read_status(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL | + P0_CON_OT_RES_FORCE_STALL | + P0_CON_IN_RES_NORMAL); +} + +static void usb3_set_p0_con_for_ctrl_write_data(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY | + P0_CON_OT_RES_NORMAL | + P0_CON_IN_RES_FORCE_STALL); +} + +static void usb3_set_p0_con_for_ctrl_write_status(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL | + P0_CON_OT_RES_NORMAL | + P0_CON_IN_RES_FORCE_STALL); +} + +static void usb3_set_p0_con_for_no_data(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL | + P0_CON_OT_RES_FORCE_STALL | + P0_CON_IN_RES_FORCE_STALL); +} + +static void usb3_set_p0_con_stall(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_STALL | + P0_CON_OT_RES_FORCE_STALL | + P0_CON_IN_RES_FORCE_STALL); +} + +static void usb3_set_p0_con_stop(struct renesas_usb3 *usb3) +{ + usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY | + P0_CON_OT_RES_FORCE_NRDY | + P0_CON_IN_RES_FORCE_NRDY); +} + +static int usb3_pn_change(struct renesas_usb3 *usb3, int num) +{ + if (num == 0 || num > usb3->num_usb3_eps) + return -ENXIO; + + usb3_write(usb3, num, USB3_PIPE_COM); + + return 0; +} + +static void usb3_set_pn_con_update_res(struct renesas_usb3 *usb3, u32 res) +{ + u32 val = usb3_read(usb3, USB3_PN_CON); + + val &= ~PN_CON_RES_MASK; + val |= res & PN_CON_RES_MASK; + val |= PN_CON_RES_WEN; + usb3_write(usb3, val, USB3_PN_CON); +} + +static void usb3_pn_start(struct renesas_usb3 *usb3) +{ + usb3_set_pn_con_update_res(usb3, PN_CON_RES_NORMAL); +} + +static void usb3_pn_stop(struct renesas_usb3 *usb3) +{ + usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_NRDY); +} + +static void usb3_pn_stall(struct renesas_usb3 *usb3) +{ + usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_STALL); +} + +static int usb3_pn_con_clear(struct renesas_usb3 *usb3) +{ + usb3_set_bit(usb3, PN_CON_CLR, USB3_PN_CON); + + return usb3_wait(usb3, USB3_PN_CON, PN_CON_CLR, 0); +} + +static bool usb3_is_transfer_complete(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req) +{ + struct usb_request *req = &usb3_req->req; + + if ((!req->zero && req->actual == req->length) || + (req->actual % usb3_ep->ep.maxpacket) || (req->length == 0)) + return true; + else + return false; +} + +static int usb3_wait_pipe_status(struct renesas_usb3_ep *usb3_ep, u32 mask) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + u32 sta_reg = usb3_ep->num ? USB3_PN_STA : USB3_P0_STA; + + return usb3_wait(usb3, sta_reg, mask, mask); +} + +static void usb3_set_px_con_send(struct renesas_usb3_ep *usb3_ep, int bytes, + bool last) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + u32 con_reg = usb3_ep->num ? USB3_PN_CON : USB3_P0_CON; + u32 val = usb3_read(usb3, con_reg); + + val |= PX_CON_SEND | PX_CON_BYTE_EN_BYTES(bytes); + val |= (usb3_ep->num && last) ? PN_CON_LAST : 0; + usb3_write(usb3, val, con_reg); +} + +static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, + u32 fifo_reg) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + int i; + int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual, + usb3_ep->ep.maxpacket); + u8 *buf = usb3_req->req.buf + usb3_req->req.actual; + u32 tmp = 0; + bool is_last; + + if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) + return -EBUSY; + + /* Update gadget driver parameter */ + usb3_req->req.actual += len; + + /* Write data to the register */ + if (len >= 4) { + iowrite32_rep(usb3->reg + fifo_reg, buf, len / 4); + buf += (len / 4) * 4; + len %= 4; /* update len to use usb3_set_pX_con_send() */ + } + + if (len) { + for (i = 0; i < len; i++) + tmp |= buf[i] << (8 * i); + usb3_write(usb3, tmp, fifo_reg); + } + + is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); + /* Send the data */ + usb3_set_px_con_send(usb3_ep, len, is_last); + + return is_last ? 0 : -EAGAIN; +} + +static u32 usb3_get_received_length(struct renesas_usb3_ep *usb3_ep) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + u32 lng_reg = usb3_ep->num ? USB3_PN_LNG : USB3_P0_LNG; + + return usb3_read(usb3, lng_reg); +} + +static int usb3_read_pipe(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, u32 fifo_reg) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + int i; + int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual, + usb3_get_received_length(usb3_ep)); + u8 *buf = usb3_req->req.buf + usb3_req->req.actual; + u32 tmp = 0; + + if (!len) + return 0; + + /* Update gadget driver parameter */ + usb3_req->req.actual += len; + + /* Read data from the register */ + if (len >= 4) { + ioread32_rep(usb3->reg + fifo_reg, buf, len / 4); + buf += (len / 4) * 4; + len %= 4; + } + + if (len) { + tmp = usb3_read(usb3, fifo_reg); + for (i = 0; i < len; i++) + buf[i] = (tmp >> (8 * i)) & 0xff; + } + + return usb3_is_transfer_complete(usb3_ep, usb3_req) ? 0 : -EAGAIN; +} + +static void usb3_set_status_stage(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + + if (usb3_ep->dir_in) { + usb3_set_p0_con_for_ctrl_read_status(usb3); + } else { + if (!usb3_req->req.length) + usb3_set_p0_con_for_no_data(usb3); + else + usb3_set_p0_con_for_ctrl_write_status(usb3); + } +} + +static void usb3_p0_xfer(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req) +{ + int ret = -EAGAIN; + + if (usb3_ep->dir_in) + ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_P0_WRITE); + else + ret = usb3_read_pipe(usb3_ep, usb3_req, USB3_P0_READ); + + if (!ret) + usb3_set_status_stage(usb3_ep, usb3_req); +} + +static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + + if (usb3_ep->started) + return; + + usb3_ep->started = true; + + if (usb3_ep->dir_in) { + usb3_set_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); + usb3_set_p0_con_for_ctrl_read_data(usb3); + } else { + usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); + usb3_set_p0_con_for_ctrl_write_data(usb3); + } + + usb3_p0_xfer(usb3_ep, usb3_req); +} + +static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep); + unsigned long flags; + int ret = -EAGAIN; + u32 enable_bits = 0; + + if (usb3_ep->halt || usb3_ep->started) + return; + if (usb3_req != usb3_req_first) + return; + + spin_lock_irqsave(&usb3->lock, flags); + if (usb3_pn_change(usb3, usb3_ep->num) < 0) + goto out; + + usb3_ep->started = true; + usb3_pn_start(usb3); + + if (usb3_ep->dir_in) { + ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE); + enable_bits |= PN_INT_LSTTR; + } + + if (ret < 0) + enable_bits |= PN_INT_BFRDY; + + if (enable_bits) { + usb3_set_bit(usb3, enable_bits, USB3_PN_INT_ENA); + usb3_enable_pipe_irq(usb3, usb3_ep->num); + } +out: + spin_unlock_irqrestore(&usb3->lock, flags); +} + +static int renesas_usb3_ep_queue(struct usb_ep *_ep, struct usb_request *_req, + gfp_t gfp_flags) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req); + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + dev_dbg(usb3_to_dev(usb3), "ep_queue: ep%2d, %u\n", usb3_ep->num, + _req->length); + + _req->status = -EINPROGRESS; + _req->actual = 0; + spin_lock_irqsave(&usb3->lock, flags); + list_add_tail(&usb3_req->queue, &usb3_ep->queue); + spin_unlock_irqrestore(&usb3->lock, flags); + + if (!usb3_ep->num) + usb3_start_pipe0(usb3_ep, usb3_req); + else + usb3_start_pipen(usb3_ep, usb3_req); + + return 0; +} + +static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr) +{ + /* DEV_ADDR bit field is cleared by WarmReset, HotReset and BusReset */ + usb3_set_bit(usb3, USB_COM_CON_DEV_ADDR(addr), USB3_USB_COM_CON); +} + +static bool usb3_std_req_set_address(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + if (ctrl->wValue >= 128) + return true; /* stall */ + + usb3_set_device_address(usb3, ctrl->wValue); + usb3_set_p0_con_for_no_data(usb3); + + return false; +} + +static void usb3_pipe0_internal_xfer(struct renesas_usb3 *usb3, + void *tx_data, size_t len, + void (*complete)(struct usb_ep *ep, + struct usb_request *req)) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0); + + if (tx_data) + memcpy(usb3->ep0_buf, tx_data, + min_t(size_t, len, USB3_EP0_BUF_SIZE)); + + usb3->ep0_req->buf = &usb3->ep0_buf; + usb3->ep0_req->length = len; + usb3->ep0_req->complete = complete; + renesas_usb3_ep_queue(&usb3_ep->ep, usb3->ep0_req, GFP_ATOMIC); +} + +static void usb3_pipe0_get_status_completion(struct usb_ep *ep, + struct usb_request *req) +{ +} + +static bool usb3_std_req_get_status(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + bool stall = false; + struct renesas_usb3_ep *usb3_ep; + int num; + u16 status = 0; + + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + if (usb3->gadget.is_selfpowered) + status |= 1 << USB_DEVICE_SELF_POWERED; + if (usb3->gadget.speed == USB_SPEED_SUPER) + status |= usb3_feature_get_un_enabled(usb3); + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; + usb3_ep = usb3_get_ep(usb3, num); + if (usb3_ep->halt) + status |= 1 << USB_ENDPOINT_HALT; + break; + default: + stall = true; + break; + } + + if (!stall) { + status = cpu_to_le16(status); + dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n", + usb_req_to_usb3_req(usb3->ep0_req)); + usb3_pipe0_internal_xfer(usb3, &status, sizeof(status), + usb3_pipe0_get_status_completion); + } + + return stall; +} + +static bool usb3_std_req_feature_device(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl, bool set) +{ + bool stall = true; + u16 w_value = le16_to_cpu(ctrl->wValue); + + switch (w_value) { + case USB_DEVICE_TEST_MODE: + if (!set) + break; + usb3->test_mode = le16_to_cpu(ctrl->wIndex) >> 8; + stall = false; + break; + case USB_DEVICE_U1_ENABLE: + case USB_DEVICE_U2_ENABLE: + if (usb3->gadget.speed != USB_SPEED_SUPER) + break; + if (w_value == USB_DEVICE_U1_ENABLE) + usb3_feature_u1_enable(usb3, set); + if (w_value == USB_DEVICE_U2_ENABLE) + usb3_feature_u2_enable(usb3, set); + stall = false; + break; + default: + break; + } + + return stall; +} + +static int usb3_set_halt_p0(struct renesas_usb3_ep *usb3_ep, bool halt) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + + if (unlikely(usb3_ep->num)) + return -EINVAL; + + usb3_ep->halt = halt; + if (halt) + usb3_set_p0_con_stall(usb3); + else + usb3_set_p0_con_stop(usb3); + + return 0; +} + +static int usb3_set_halt_pn(struct renesas_usb3_ep *usb3_ep, bool halt, + bool is_clear_feature) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + if (!usb3_pn_change(usb3, usb3_ep->num)) { + usb3_ep->halt = halt; + if (halt) { + usb3_pn_stall(usb3); + } else if (!is_clear_feature || !usb3_ep->wedge) { + usb3_pn_con_clear(usb3); + usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON); + usb3_pn_stop(usb3); + } + } + spin_unlock_irqrestore(&usb3->lock, flags); + + return 0; +} + +static int usb3_set_halt(struct renesas_usb3_ep *usb3_ep, bool halt, + bool is_clear_feature) +{ + int ret = 0; + + if (halt && usb3_ep->started) + return -EAGAIN; + + if (usb3_ep->num) + ret = usb3_set_halt_pn(usb3_ep, halt, is_clear_feature); + else + ret = usb3_set_halt_p0(usb3_ep, halt); + + return ret; +} + +static bool usb3_std_req_feature_endpoint(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl, + bool set) +{ + int num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; + struct renesas_usb3_ep *usb3_ep; + struct renesas_usb3_request *usb3_req; + + if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) + return true; /* stall */ + + usb3_ep = usb3_get_ep(usb3, num); + usb3_set_halt(usb3_ep, set, true); + + /* Restarts a queue if clear feature */ + if (!set) { + usb3_ep->started = false; + usb3_req = usb3_get_request(usb3_ep); + if (usb3_req) + usb3_start_pipen(usb3_ep, usb3_req); + } + + return false; +} + +static bool usb3_std_req_feature(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl, bool set) +{ + bool stall = false; + + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + stall = usb3_std_req_feature_device(usb3, ctrl, set); + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + stall = usb3_std_req_feature_endpoint(usb3, ctrl, set); + break; + default: + stall = true; + break; + } + + if (!stall) + usb3_set_p0_con_for_no_data(usb3); + + return stall; +} + +static void usb3_pipe0_set_sel_completion(struct usb_ep *ep, + struct usb_request *req) +{ + /* TODO */ +} + +static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (w_length != 6) + return true; /* stall */ + + dev_dbg(usb3_to_dev(usb3), "set_sel: req = %p\n", + usb_req_to_usb3_req(usb3->ep0_req)); + usb3_pipe0_internal_xfer(usb3, NULL, 6, usb3_pipe0_set_sel_completion); + + return false; +} + +static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + if (ctrl->wValue > 0) + usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON); + else + usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON); + + return false; +} + +/** + * usb3_handle_standard_request - handle some standard requests + * @usb3: the renesas_usb3 pointer + * @ctrl: a pointer of setup data + * + * Returns true if this function handled a standard request + */ +static bool usb3_handle_standard_request(struct renesas_usb3 *usb3, + struct usb_ctrlrequest *ctrl) +{ + bool ret = false; + bool stall = false; + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + switch (ctrl->bRequest) { + case USB_REQ_SET_ADDRESS: + stall = usb3_std_req_set_address(usb3, ctrl); + ret = true; + break; + case USB_REQ_GET_STATUS: + stall = usb3_std_req_get_status(usb3, ctrl); + ret = true; + break; + case USB_REQ_CLEAR_FEATURE: + stall = usb3_std_req_feature(usb3, ctrl, false); + ret = true; + break; + case USB_REQ_SET_FEATURE: + stall = usb3_std_req_feature(usb3, ctrl, true); + ret = true; + break; + case USB_REQ_SET_SEL: + stall = usb3_std_req_set_sel(usb3, ctrl); + ret = true; + break; + case USB_REQ_SET_ISOCH_DELAY: + /* This hardware doesn't support Isochronous xfer */ + stall = true; + ret = true; + break; + case USB_REQ_SET_CONFIGURATION: + usb3_std_req_set_configuration(usb3, ctrl); + break; + default: + break; + } + } + + if (stall) + usb3_set_p0_con_stall(usb3); + + return ret; +} + +static int usb3_p0_con_clear_buffer(struct renesas_usb3 *usb3) +{ + usb3_set_bit(usb3, P0_CON_BCLR, USB3_P0_CON); + + return usb3_wait(usb3, USB3_P0_CON, P0_CON_BCLR, 0); +} + +static void usb3_irq_epc_pipe0_setup(struct renesas_usb3 *usb3) +{ + struct usb_ctrlrequest ctrl; + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0); + + /* Call giveback function if previous transfer is not completed */ + if (usb3_ep->started) + usb3_request_done(usb3_ep, usb3_get_request(usb3_ep), + -ECONNRESET); + + usb3_p0_con_clear_buffer(usb3); + usb3_get_setup_data(usb3, &ctrl); + if (!usb3_handle_standard_request(usb3, &ctrl)) + if (usb3->driver->setup(&usb3->gadget, &ctrl) < 0) + usb3_set_p0_con_stall(usb3); +} + +static void usb3_irq_epc_pipe0_bfrdy(struct renesas_usb3 *usb3) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0); + struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + + if (!usb3_req) + return; + + usb3_p0_xfer(usb3_ep, usb3_req); +} + +static void usb3_irq_epc_pipe0(struct renesas_usb3 *usb3) +{ + u32 p0_int_sta = usb3_read(usb3, USB3_P0_INT_STA); + + p0_int_sta &= usb3_read(usb3, USB3_P0_INT_ENA); + usb3_write(usb3, p0_int_sta, USB3_P0_INT_STA); + if (p0_int_sta & P0_INT_STSED) + usb3_irq_epc_pipe0_status_end(usb3); + if (p0_int_sta & P0_INT_SETUP) + usb3_irq_epc_pipe0_setup(usb3); + if (p0_int_sta & P0_INT_BFRDY) + usb3_irq_epc_pipe0_bfrdy(usb3); +} + +static void usb3_request_done_pipen(struct renesas_usb3 *usb3, + struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, + int status) +{ + usb3_pn_stop(usb3); + usb3_disable_pipe_irq(usb3, usb3_ep->num); + usb3_request_done(usb3_ep, usb3_req, status); + + /* get next usb3_req */ + usb3_req = usb3_get_request(usb3_ep); + if (usb3_req) + usb3_start_pipen(usb3_ep, usb3_req); +} + +static void usb3_irq_epc_pipen_lsttr(struct renesas_usb3 *usb3, int num) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); + struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + + if (!usb3_req) + return; + + if (usb3_ep->dir_in) { + dev_dbg(usb3_to_dev(usb3), "%s: len = %u, actual = %u\n", + __func__, usb3_req->req.length, usb3_req->req.actual); + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); + } +} + +static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num) +{ + struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); + struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + + if (!usb3_req) + return; + + if (usb3_ep->dir_in) { + /* Do not stop the IN pipe here to detect LSTTR interrupt */ + if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) + usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); + } else { + if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); + } +} + +static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) +{ + u32 pn_int_sta; + + if (usb3_pn_change(usb3, num) < 0) + return; + + pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); + pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); + usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); + if (pn_int_sta & PN_INT_LSTTR) + usb3_irq_epc_pipen_lsttr(usb3, num); + if (pn_int_sta & PN_INT_BFRDY) + usb3_irq_epc_pipen_bfrdy(usb3, num); +} + +static void usb3_irq_epc_int_2(struct renesas_usb3 *usb3, u32 int_sta_2) +{ + int i; + + for (i = 0; i < usb3->num_usb3_eps; i++) { + if (int_sta_2 & USB_INT_2_PIPE(i)) { + if (!i) + usb3_irq_epc_pipe0(usb3); + else + usb3_irq_epc_pipen(usb3, i); + } + } +} + +static void usb3_irq_epc(struct renesas_usb3 *usb3) +{ + u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1); + u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2); + + int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1); + if (int_sta_1) { + usb3_write(usb3, int_sta_1, USB3_USB_INT_STA_1); + usb3_irq_epc_int_1(usb3, int_sta_1); + } + + int_sta_2 &= usb3_read(usb3, USB3_USB_INT_ENA_2); + if (int_sta_2) + usb3_irq_epc_int_2(usb3, int_sta_2); +} + +static irqreturn_t renesas_usb3_irq(int irq, void *_usb3) +{ + struct renesas_usb3 *usb3 = _usb3; + irqreturn_t ret = IRQ_NONE; + u32 axi_int_sta = usb3_read(usb3, USB3_AXI_INT_STA); + + if (axi_int_sta & AXI_INT_EPCINT) { + usb3_irq_epc(usb3); + ret = IRQ_HANDLED; + } + + return ret; +} + +static void usb3_write_pn_mod(struct renesas_usb3_ep *usb3_ep, + const struct usb_endpoint_descriptor *desc) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + u32 val = 0; + + val |= usb3_ep->dir_in ? PN_MOD_DIR : 0; + val |= PN_MOD_TYPE(usb_endpoint_type(desc)); + val |= PN_MOD_EPNUM(usb_endpoint_num(desc)); + usb3_write(usb3, val, USB3_PN_MOD); +} + +static u32 usb3_calc_ramarea(int ram_size) +{ + WARN_ON(ram_size > SZ_16K); + + if (ram_size <= SZ_1K) + return PN_RAMMAP_RAMAREA_1KB; + else if (ram_size <= SZ_2K) + return PN_RAMMAP_RAMAREA_2KB; + else if (ram_size <= SZ_4K) + return PN_RAMMAP_RAMAREA_4KB; + else if (ram_size <= SZ_8K) + return PN_RAMMAP_RAMAREA_8KB; + else + return PN_RAMMAP_RAMAREA_16KB; +} + +static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, + const struct usb_endpoint_descriptor *desc) +{ + return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc)); +} + +static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, + const struct usb_endpoint_descriptor *desc) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + usb3_ep->dir_in = usb_endpoint_dir_in(desc); + + spin_lock_irqsave(&usb3->lock, flags); + if (!usb3_pn_change(usb3, usb3_ep->num)) { + usb3_write_pn_mod(usb3_ep, desc); + usb3_write(usb3, usb3_calc_rammap_val(usb3_ep, desc), + USB3_PN_RAMMAP); + usb3_pn_con_clear(usb3); + usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON); + } + spin_unlock_irqrestore(&usb3->lock, flags); + + return 0; +} + +static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + usb3_ep->halt = false; + + spin_lock_irqsave(&usb3->lock, flags); + if (!usb3_pn_change(usb3, usb3_ep->num)) { + usb3_write(usb3, 0, USB3_PN_RAMMAP); + usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); + } + spin_unlock_irqrestore(&usb3->lock, flags); + + return 0; +} + +/*------- usb_ep_ops -----------------------------------------------------*/ +static int renesas_usb3_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + + return usb3_enable_pipe_n(usb3_ep, desc); +} + +static int renesas_usb3_ep_disable(struct usb_ep *_ep) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + struct renesas_usb3_request *usb3_req; + + do { + usb3_req = usb3_get_request(usb3_ep); + if (!usb3_req) + break; + usb3_request_done(usb3_ep, usb3_req, -ESHUTDOWN); + } while (1); + + return usb3_disable_pipe_n(usb3_ep); +} + +static struct usb_request *__renesas_usb3_ep_alloc_request(gfp_t gfp_flags) +{ + struct renesas_usb3_request *usb3_req; + + usb3_req = kzalloc(sizeof(struct renesas_usb3_request), gfp_flags); + if (!usb3_req) + return NULL; + + INIT_LIST_HEAD(&usb3_req->queue); + + return &usb3_req->req; +} + +static void __renesas_usb3_ep_free_request(struct usb_request *_req) +{ + struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req); + + kfree(usb3_req); +} + +static struct usb_request *renesas_usb3_ep_alloc_request(struct usb_ep *_ep, + gfp_t gfp_flags) +{ + return __renesas_usb3_ep_alloc_request(gfp_flags); +} + +static void renesas_usb3_ep_free_request(struct usb_ep *_ep, + struct usb_request *_req) +{ + __renesas_usb3_ep_free_request(_req); +} + +static int renesas_usb3_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req); + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + + dev_dbg(usb3_to_dev(usb3), "ep_dequeue: ep%2d, %u\n", usb3_ep->num, + _req->length); + + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, -ECONNRESET); + + return 0; +} + +static int renesas_usb3_ep_set_halt(struct usb_ep *_ep, int value) +{ + return usb3_set_halt(usb_ep_to_usb3_ep(_ep), !!value, false); +} + +static int renesas_usb3_ep_set_wedge(struct usb_ep *_ep) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + + usb3_ep->wedge = true; + return usb3_set_halt(usb3_ep, true, false); +} + +static void renesas_usb3_ep_fifo_flush(struct usb_ep *_ep) +{ + struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep); + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + if (usb3_ep->num) { + spin_lock_irqsave(&usb3->lock, flags); + if (!usb3_pn_change(usb3, usb3_ep->num)) { + usb3_pn_con_clear(usb3); + usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON); + } + spin_unlock_irqrestore(&usb3->lock, flags); + } else { + usb3_p0_con_clear_buffer(usb3); + } +} + +static struct usb_ep_ops renesas_usb3_ep_ops = { + .enable = renesas_usb3_ep_enable, + .disable = renesas_usb3_ep_disable, + + .alloc_request = renesas_usb3_ep_alloc_request, + .free_request = renesas_usb3_ep_free_request, + + .queue = renesas_usb3_ep_queue, + .dequeue = renesas_usb3_ep_dequeue, + + .set_halt = renesas_usb3_ep_set_halt, + .set_wedge = renesas_usb3_ep_set_wedge, + .fifo_flush = renesas_usb3_ep_fifo_flush, +}; + +/*------- usb_gadget_ops -------------------------------------------------*/ +static int renesas_usb3_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct renesas_usb3 *usb3; + + if (!driver || driver->max_speed < USB_SPEED_FULL || + !driver->setup) + return -EINVAL; + + usb3 = gadget_to_renesas_usb3(gadget); + + /* hook up the driver */ + usb3->driver = driver; + + renesas_usb3_init_controller(usb3); + + return 0; +} + +static int renesas_usb3_stop(struct usb_gadget *gadget) +{ + struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + usb3->softconnect = false; + usb3->gadget.speed = USB_SPEED_UNKNOWN; + usb3->driver = NULL; + renesas_usb3_stop_controller(usb3); + spin_unlock_irqrestore(&usb3->lock, flags); + + return 0; +} + +static int renesas_usb3_get_frame(struct usb_gadget *_gadget) +{ + return -EOPNOTSUPP; +} + +static int renesas_usb3_pullup(struct usb_gadget *gadget, int is_on) +{ + struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); + + usb3->softconnect = !!is_on; + + return 0; +} + +static int renesas_usb3_set_selfpowered(struct usb_gadget *gadget, int is_self) +{ + gadget->is_selfpowered = !!is_self; + + return 0; +} + +static const struct usb_gadget_ops renesas_usb3_gadget_ops = { + .get_frame = renesas_usb3_get_frame, + .udc_start = renesas_usb3_start, + .udc_stop = renesas_usb3_stop, + .pullup = renesas_usb3_pullup, + .set_selfpowered = renesas_usb3_set_selfpowered, +}; + +/*------- platform_driver ------------------------------------------------*/ +static int renesas_usb3_remove(struct platform_device *pdev) +{ + struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + usb_del_gadget_udc(&usb3->gadget); + + __renesas_usb3_ep_free_request(usb3->ep0_req); + + return 0; +} + +static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev, + const struct renesas_usb3_priv *priv) +{ + struct renesas_usb3_ep *usb3_ep; + int i; + + /* calculate num_usb3_eps from renesas_usb3_priv */ + usb3->num_usb3_eps = priv->ramsize_per_ramif * priv->num_ramif * 2 / + priv->ramsize_per_pipe + 1; + + if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES) + usb3->num_usb3_eps = USB3_MAX_NUM_PIPES; + + usb3->usb3_ep = devm_kzalloc(dev, sizeof(*usb3_ep) * usb3->num_usb3_eps, + GFP_KERNEL); + if (!usb3->usb3_ep) + return -ENOMEM; + + dev_dbg(dev, "%s: num_usb3_eps = %d\n", __func__, usb3->num_usb3_eps); + /* + * This driver prepares pipes as the followings: + * - odd pipes = IN pipe + * - even pipes = OUT pipe (except pipe 0) + */ + usb3_for_each_ep(usb3_ep, usb3, i) { + snprintf(usb3_ep->ep_name, sizeof(usb3_ep->ep_name), "ep%d", i); + usb3_ep->usb3 = usb3; + usb3_ep->num = i; + usb3_ep->ep.name = usb3_ep->ep_name; + usb3_ep->ep.ops = &renesas_usb3_ep_ops; + INIT_LIST_HEAD(&usb3_ep->queue); + INIT_LIST_HEAD(&usb3_ep->ep.ep_list); + if (!i) { + /* for control pipe */ + usb3->gadget.ep0 = &usb3_ep->ep; + usb_ep_set_maxpacket_limit(&usb3_ep->ep, + USB3_EP0_HSFS_MAX_PACKET_SIZE); + usb3_ep->ep.caps.type_control = true; + usb3_ep->ep.caps.dir_in = true; + usb3_ep->ep.caps.dir_out = true; + continue; + } + + /* for bulk or interrupt pipe */ + usb_ep_set_maxpacket_limit(&usb3_ep->ep, ~0); + list_add_tail(&usb3_ep->ep.ep_list, &usb3->gadget.ep_list); + usb3_ep->ep.caps.type_bulk = true; + usb3_ep->ep.caps.type_int = true; + if (i & 1) + usb3_ep->ep.caps.dir_in = true; + else + usb3_ep->ep.caps.dir_out = true; + } + + return 0; +} + +static void renesas_usb3_init_ram(struct renesas_usb3 *usb3, struct device *dev, + const struct renesas_usb3_priv *priv) +{ + struct renesas_usb3_ep *usb3_ep; + int i; + u32 ramif[2], basead[2]; /* index 0 = for IN pipes */ + u32 *cur_ramif, *cur_basead; + u32 val; + + memset(ramif, 0, sizeof(ramif)); + memset(basead, 0, sizeof(basead)); + + /* + * This driver prepares pipes as the followings: + * - all pipes = the same size as "ramsize_per_pipe" + * Please refer to the "Method of Specifying RAM Mapping" + */ + usb3_for_each_ep(usb3_ep, usb3, i) { + if (!i) + continue; /* out of scope if ep num = 0 */ + if (usb3_ep->ep.caps.dir_in) { + cur_ramif = &ramif[0]; + cur_basead = &basead[0]; + } else { + cur_ramif = &ramif[1]; + cur_basead = &basead[1]; + } + + if (*cur_basead > priv->ramsize_per_ramif) + continue; /* out of memory for IN or OUT pipe */ + + /* calculate rammap_val */ + val = PN_RAMMAP_RAMIF(*cur_ramif); + val |= usb3_calc_ramarea(priv->ramsize_per_pipe); + val |= PN_RAMMAP_BASEAD(*cur_basead); + usb3_ep->rammap_val = val; + + dev_dbg(dev, "ep%2d: val = %08x, ramif = %d, base = %x\n", + i, val, *cur_ramif, *cur_basead); + + /* update current ramif */ + if (*cur_ramif + 1 == priv->num_ramif) { + *cur_ramif = 0; + *cur_basead += priv->ramsize_per_pipe; + } else { + (*cur_ramif)++; + } + } +} + +static const struct renesas_usb3_priv renesas_usb3_priv_r8a7795 = { + .ramsize_per_ramif = SZ_16K, + .num_ramif = 2, + .ramsize_per_pipe = SZ_4K, + .workaround_for_vbus = true, +}; + +static const struct of_device_id usb3_of_match[] = { + { + .compatible = "renesas,r8a7795-usb3-peri", + .data = &renesas_usb3_priv_r8a7795, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, usb3_of_match); + +static int renesas_usb3_probe(struct platform_device *pdev) +{ + struct renesas_usb3 *usb3; + struct resource *res; + const struct of_device_id *match; + int irq, ret; + const struct renesas_usb3_priv *priv; + + match = of_match_node(usb3_of_match, pdev->dev.of_node); + if (!match) + return -ENODEV; + priv = match->data; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENODEV; + + usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL); + if (!usb3) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + usb3->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(usb3->reg)) + return PTR_ERR(usb3->reg); + + platform_set_drvdata(pdev, usb3); + spin_lock_init(&usb3->lock); + + usb3->gadget.ops = &renesas_usb3_gadget_ops; + usb3->gadget.name = udc_name; + usb3->gadget.max_speed = USB_SPEED_SUPER; + INIT_LIST_HEAD(&usb3->gadget.ep_list); + ret = renesas_usb3_init_ep(usb3, &pdev->dev, priv); + if (ret < 0) + return ret; + renesas_usb3_init_ram(usb3, &pdev->dev, priv); + + ret = devm_request_irq(&pdev->dev, irq, renesas_usb3_irq, 0, + dev_name(&pdev->dev), usb3); + if (ret < 0) + return ret; + + /* for ep0 handling */ + usb3->ep0_req = __renesas_usb3_ep_alloc_request(GFP_KERNEL); + if (!usb3->ep0_req) + return -ENOMEM; + + ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget); + if (ret < 0) + goto err_add_udc; + + usb3->workaround_for_vbus = priv->workaround_for_vbus; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + dev_info(&pdev->dev, "probed\n"); + + return 0; + +err_add_udc: + __renesas_usb3_ep_free_request(usb3->ep0_req); + + return ret; +} + +static struct platform_driver renesas_usb3_driver = { + .probe = renesas_usb3_probe, + .remove = renesas_usb3_remove, + .driver = { + .name = (char *)udc_name, + .of_match_table = of_match_ptr(usb3_of_match), + }, +}; +module_platform_driver(renesas_usb3_driver); + +MODULE_DESCRIPTION("Renesas USB3.0 Peripheral driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>"); +MODULE_ALIAS("platform:renesas_usb3"); diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c index e9def42ce50d..82a9e2a3bedc 100644 --- a/drivers/usb/gadget/udc/s3c-hsudc.c +++ b/drivers/usb/gadget/udc/s3c-hsudc.c @@ -569,7 +569,7 @@ static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc, hsep = &hsudc->ep[ep_num]; switch (le16_to_cpu(ctrl->wValue)) { case USB_ENDPOINT_HALT: - if (set || (!set && !hsep->wedge)) + if (set || !hsep->wedge) s3c_hsudc_set_halt(&hsep->ep, set); return 0; } diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index f660afba715d..fd73a3ea07c2 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -51,8 +51,12 @@ struct usb_udc { static struct class *udc_class; static LIST_HEAD(udc_list); +static LIST_HEAD(gadget_driver_pending_list); static DEFINE_MUTEX(udc_lock); +static int udc_bind_to_driver(struct usb_udc *udc, + struct usb_gadget_driver *driver); + /* ------------------------------------------------------------------------- */ #ifdef CONFIG_HAS_DMA @@ -356,6 +360,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { struct usb_udc *udc; + struct usb_gadget_driver *driver; int ret = -ENOMEM; udc = kzalloc(sizeof(*udc), GFP_KERNEL); @@ -403,6 +408,18 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; + /* pick up one of pending gadget drivers */ + list_for_each_entry(driver, &gadget_driver_pending_list, pending) { + if (!driver->udc_name || strcmp(driver->udc_name, + dev_name(&udc->dev)) == 0) { + ret = udc_bind_to_driver(udc, driver); + if (ret) + goto err4; + list_del(&driver->pending); + break; + } + } + mutex_unlock(&udc_lock); return 0; @@ -473,10 +490,14 @@ void usb_del_gadget_udc(struct usb_gadget *gadget) mutex_lock(&udc_lock); list_del(&udc->list); - mutex_unlock(&udc_lock); - if (udc->driver) + if (udc->driver) { + struct usb_gadget_driver *driver = udc->driver; + usb_gadget_remove_driver(udc); + list_add(&driver->pending, &gadget_driver_pending_list); + } + mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); flush_work(&gadget->work); @@ -520,50 +541,36 @@ err1: return ret; } -int usb_udc_attach_driver(const char *name, struct usb_gadget_driver *driver) -{ - struct usb_udc *udc = NULL; - int ret = -ENODEV; - - mutex_lock(&udc_lock); - list_for_each_entry(udc, &udc_list, list) { - ret = strcmp(name, dev_name(&udc->dev)); - if (!ret) - break; - } - if (ret) { - ret = -ENODEV; - goto out; - } - if (udc->driver) { - ret = -EBUSY; - goto out; - } - ret = udc_bind_to_driver(udc, driver); -out: - mutex_unlock(&udc_lock); - return ret; -} -EXPORT_SYMBOL_GPL(usb_udc_attach_driver); - int usb_gadget_probe_driver(struct usb_gadget_driver *driver) { struct usb_udc *udc = NULL; - int ret; + int ret = -ENODEV; if (!driver || !driver->bind || !driver->setup) return -EINVAL; mutex_lock(&udc_lock); - list_for_each_entry(udc, &udc_list, list) { - /* For now we take the first one */ - if (!udc->driver) + if (driver->udc_name) { + list_for_each_entry(udc, &udc_list, list) { + ret = strcmp(driver->udc_name, dev_name(&udc->dev)); + if (!ret) + break; + } + if (!ret && !udc->driver) goto found; + } else { + list_for_each_entry(udc, &udc_list, list) { + /* For now we take the first one */ + if (!udc->driver) + goto found; + } } - pr_debug("couldn't find an available UDC\n"); + list_add_tail(&driver->pending, &gadget_driver_pending_list); + pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", + driver->function); mutex_unlock(&udc_lock); - return -ENODEV; + return 0; found: ret = udc_bind_to_driver(udc, driver); mutex_unlock(&udc_lock); @@ -589,6 +596,10 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) break; } + if (ret) { + list_del(&driver->pending); + ret = 0; + } mutex_unlock(&udc_lock); return ret; } diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 3bb08870148f..daa563ff1fa0 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -41,6 +41,15 @@ config USB_XHCI_PLATFORM If unsure, say N. +config USB_XHCI_MTK + tristate "xHCI support for Mediatek MT65xx" + select MFD_SYSCON + depends on ARCH_MEDIATEK || COMPILE_TEST + ---help--- + Say 'Y' to enable the support for the xHCI host controller + found in Mediatek MT65xx SoCs. + If unsure, say N. + config USB_XHCI_MVEBU tristate "xHCI support for Marvell Armada 375/38x" select USB_XHCI_PLATFORM diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index e7558abc994d..65a06b4382bf 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -13,6 +13,9 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o xhci-hcd-y := xhci.o xhci-mem.o xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o xhci-hcd-y += xhci-trace.o +ifneq ($(CONFIG_USB_XHCI_MTK), ) + xhci-hcd-y += xhci-mtk-sch.o +endif xhci-plat-hcd-y := xhci-plat.o ifneq ($(CONFIG_USB_XHCI_MVEBU), ) @@ -64,6 +67,7 @@ obj-$(CONFIG_USB_FHCI_HCD) += fhci.o obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o +obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk.o obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 5398e3d42822..291aaa2baed8 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -21,6 +21,7 @@ */ #include <linux/bcma/bcma.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/slab.h> @@ -36,6 +37,7 @@ MODULE_LICENSE("GPL"); struct bcma_hcd_device { struct platform_device *ehci_dev; struct platform_device *ohci_dev; + struct gpio_desc *gpio_desc; }; /* Wait for bitmask in a register to get set or cleared. @@ -228,19 +230,12 @@ static void bcma_hcd_init_chip_arm(struct bcma_device *dev) static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val) { - int gpio; + struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev); - gpio = of_get_named_gpio(dev->dev.of_node, "vcc-gpio", 0); - if (!gpio_is_valid(gpio)) + if (IS_ERR_OR_NULL(usb_dev->gpio_desc)) return; - if (val) { - gpio_request(gpio, "bcma-hcd-gpio"); - gpio_set_value(gpio, 1); - } else { - gpio_set_value(gpio, 0); - gpio_free(gpio); - } + gpiod_set_value(usb_dev->gpio_desc, val); } static const struct usb_ehci_pdata ehci_pdata = { @@ -314,7 +309,11 @@ static int bcma_hcd_probe(struct bcma_device *dev) if (!usb_dev) return -ENOMEM; - bcma_hci_platform_power_gpio(dev, true); + if (dev->dev.of_node) + usb_dev->gpio_desc = devm_get_gpiod_from_child(&dev->dev, "vcc", + &dev->dev.of_node->fwnode); + if (!IS_ERR_OR_NULL(usb_dev->gpio_desc)) + gpiod_direction_output(usb_dev->gpio_desc, 1); switch (dev->id.id) { case BCMA_CORE_NS_USB20: diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index b26b96e25a13..b7d623f1523c 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -436,7 +436,8 @@ static void qh_lines ( scratch = hc32_to_cpup(ehci, &hw->hw_info1); hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0; temp = scnprintf (next, size, - "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", + "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)" + " [cur %08x next %08x buf[0] %08x]", qh, scratch & 0x007f, speed_char (scratch), (scratch >> 8) & 0x000f, @@ -444,7 +445,10 @@ static void qh_lines ( hc32_to_cpup(ehci, &hw->hw_token), mark, (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token) ? "data1" : "data0", - (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f); + (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f, + hc32_to_cpup(ehci, &hw->hw_current), + hc32_to_cpup(ehci, &hw->hw_qtd_next), + hc32_to_cpup(ehci, &hw->hw_buf[0])); size -= temp; next += temp; @@ -464,7 +468,8 @@ static void qh_lines ( mark = '/'; } temp = snprintf (next, size, - "\n\t%p%c%s len=%d %08x urb %p", + "\n\t%p%c%s len=%d %08x urb %p" + " [td %08x buf[0] %08x]", td, mark, ({ char *tmp; switch ((scratch>>8)&0x03) { case 0: tmp = "out"; break; @@ -474,7 +479,9 @@ static void qh_lines ( } tmp;}), (scratch >> 16) & 0x7fff, scratch, - td->urb); + td->urb, + (u32) td->qtd_dma, + hc32_to_cpup(ehci, &td->hw_buf[0])); if (size < temp) temp = size; size -= temp; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 48c92bf78bd0..14178bbf0694 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -98,7 +98,7 @@ module_param (park, uint, S_IRUGO); MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); /* for flakey hardware, ignore overcurrent indicators */ -static bool ignore_oc = 0; +static bool ignore_oc; module_param (ignore_oc, bool, S_IRUGO); MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index b6205fac3567..4de43011df23 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -128,21 +128,13 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) ehci->dummy = NULL; /* DMA consistent memory and pools */ - if (ehci->qtd_pool) - dma_pool_destroy (ehci->qtd_pool); + dma_pool_destroy(ehci->qtd_pool); ehci->qtd_pool = NULL; - - if (ehci->qh_pool) { - dma_pool_destroy (ehci->qh_pool); - ehci->qh_pool = NULL; - } - - if (ehci->itd_pool) - dma_pool_destroy (ehci->itd_pool); + dma_pool_destroy(ehci->qh_pool); + ehci->qh_pool = NULL; + dma_pool_destroy(ehci->itd_pool); ehci->itd_pool = NULL; - - if (ehci->sitd_pool) - dma_pool_destroy (ehci->sitd_pool); + dma_pool_destroy(ehci->sitd_pool); ehci->sitd_pool = NULL; if (ehci->periodic) diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index c4f84c81de01..c23e2858c815 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c @@ -57,8 +57,8 @@ static int ehci_msm_reset(struct usb_hcd *hcd) /* bursts of unspecified length. */ writel(0, USB_AHBBURST); - /* Use the AHB transactor */ - writel(0, USB_AHBMODE); + /* Use the AHB transactor, allow posted data writes */ + writel(0x8, USB_AHBMODE); /* Disable streaming mode and select host mode */ writel(0x13, USB_USBMODE); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 54f5332f814d..aad0777240d3 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -132,10 +132,14 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) * qtd is updated in qh_completions(). Update the QH * overlay here. */ - if (qh->hw->hw_token & ACTIVE_BIT(ehci)) + if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { qh->hw->hw_qtd_next = qtd->hw_next; - else + if (qh->should_be_inactive) + ehci_warn(ehci, "qh %p should be inactive!\n", qh); + } else { qh_update(ehci, qh, qtd); + } + qh->should_be_inactive = 0; } /*-------------------------------------------------------------------------*/ @@ -438,6 +442,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) (hw->hw_token & ACTIVE_BIT(ehci))) { token = hc32_to_cpu(ehci, hw->hw_token); hw->hw_token &= ~ACTIVE_BIT(ehci); + qh->should_be_inactive = 1; /* An unlink may leave an incomplete * async transaction in the TT buffer. diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 46f62e41bcde..ec61aedb0067 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -439,6 +439,7 @@ struct ehci_qh { unsigned dequeue_during_giveback:1; unsigned exception:1; /* got a fault, or an unlink was requested */ + unsigned should_be_inactive:1; }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index 1498061f0aea..f82ad5df1b0d 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c @@ -85,7 +85,7 @@ static struct usb_td __iomem *next_bd(struct usb_td __iomem *base, void fhci_push_dummy_bd(struct endpoint *ep) { - if (ep->already_pushed_dummy_bd == false) { + if (!ep->already_pushed_dummy_bd) { u16 td_status = in_be16(&ep->empty_td->status); out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 760cb57e954e..04dcedfdebf8 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -99,13 +99,13 @@ static void io_watchdog_func(unsigned long _ohci); /* Some boards misreport power switching/overcurrent */ -static bool distrust_firmware = 1; +static bool distrust_firmware = true; module_param (distrust_firmware, bool, 0); MODULE_PARM_DESC (distrust_firmware, "true to distrust firmware power/overcurrent setup"); /* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */ -static bool no_handshake = 0; +static bool no_handshake; module_param (no_handshake, bool, 0); MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake"); diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index ba1bec7db026..e8c006e7a960 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -365,19 +365,19 @@ static int ohci_pxa_of_init(struct platform_device *pdev) if (!pdata) return -ENOMEM; - if (of_get_property(np, "marvell,enable-port1", NULL)) + if (of_property_read_bool(np, "marvell,enable-port1")) pdata->flags |= ENABLE_PORT1; - if (of_get_property(np, "marvell,enable-port2", NULL)) + if (of_property_read_bool(np, "marvell,enable-port2")) pdata->flags |= ENABLE_PORT2; - if (of_get_property(np, "marvell,enable-port3", NULL)) + if (of_property_read_bool(np, "marvell,enable-port3")) pdata->flags |= ENABLE_PORT3; - if (of_get_property(np, "marvell,port-sense-low", NULL)) + if (of_property_read_bool(np, "marvell,port-sense-low")) pdata->flags |= POWER_SENSE_LOW; - if (of_get_property(np, "marvell,power-control-low", NULL)) + if (of_property_read_bool(np, "marvell,power-control-low")) pdata->flags |= POWER_CONTROL_LOW; - if (of_get_property(np, "marvell,no-oc-protection", NULL)) + if (of_property_read_bool(np, "marvell,no-oc-protection")) pdata->flags |= NO_OC_PROTECTION; - if (of_get_property(np, "marvell,oc-mode-perport", NULL)) + if (of_property_read_bool(np, "marvell,oc-mode-perport")) pdata->flags |= OC_MODE_PERPORT; if (!of_property_read_u32(np, "marvell,power-on-delay", &tmp)) pdata->power_on_delay = tmp; diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 1f139d82cee0..bc74aca8a54c 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -394,8 +394,7 @@ static void ehci_quiesce(struct oxu_hcd *oxu) u32 temp; #ifdef DEBUG - if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) - BUG(); + BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)); #endif /* wait for any schedule enables/disables to take effect */ @@ -1709,9 +1708,8 @@ static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) #ifdef DEBUG assert_spin_locked(&oxu->lock); - if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED - && qh->qh_state != QH_STATE_UNLINK_WAIT)) - BUG(); + BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED + && qh->qh_state != QH_STATE_UNLINK_WAIT)); #endif /* stop async schedule right now? */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index f9400564cb72..26cb8c861e6e 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -984,24 +984,17 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) * Find the Legacy Support Capability register - * this is optional for xHCI host controllers. */ - ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); - do { - if ((ext_cap_offset + sizeof(val)) > len) { - /* We're reading garbage from the controller */ - dev_warn(&pdev->dev, - "xHCI controller failing to respond"); - return; - } + ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); - if (!ext_cap_offset) - /* We've reached the end of the extended capabilities */ - goto hc_init; + if (!ext_cap_offset) + goto hc_init; - val = readl(base + ext_cap_offset); - if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) - break; - ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); - } while (1); + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, "xHCI controller failing to respond"); + return; + } + val = readl(base + ext_cap_offset); /* If the BIOS owns the HC, signal that the OS wants it, and wait */ if (val & XHCI_HC_BIOS_OWNED) { diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 692ccc69345e..05c85c7baf84 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -73,7 +73,7 @@ MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444) INT_MODULE_PARM(testing, 0); /* Some boards misreport power switching/overcurrent*/ -static bool distrust_firmware = 1; +static bool distrust_firmware = true; module_param(distrust_firmware, bool, 0); MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren" "t setup"); diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index 1b28a000d5c6..9c6635d43db0 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c @@ -584,27 +584,8 @@ static int uhci_debug_open(struct inode *inode, struct file *file) static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence) { - struct uhci_debug *up; - loff_t new = -1; - - up = file->private_data; - - /* - * XXX: atomic 64bit seek access, but that needs to be fixed in the VFS - */ - switch (whence) { - case 0: - new = off; - break; - case 1: - new = file->f_pos + off; - break; - } - - if (new < 0 || new > up->size) - return -EINVAL; - - return (file->f_pos = new); + struct uhci_debug *up = file->private_data; + return no_seek_end_llseek_size(file, off, whence, up->size); } static ssize_t uhci_debug_read(struct file *file, char __user *buf, diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index da6f56d996ce..c17ea1589b83 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -248,11 +248,10 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, dma_addr_t dma_handle; struct uhci_qh *qh; - qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); + qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); if (!qh) return NULL; - memset(qh, 0, sizeof(*qh)); qh->dma_handle = dma_handle; qh->element = UHCI_PTR_TERM(uhci); diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 9f1c0538b211..1a8e960d073b 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -30,10 +30,9 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) struct whc_qset *qset; dma_addr_t dma; - qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); + qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma); if (qset == NULL) return NULL; - memset(qset, 0, sizeof(struct whc_qset)); qset->qset_dma = dma; qset->whc = whc; @@ -400,7 +399,7 @@ static void urb_dequeue_work(struct work_struct *work) struct whc *whc = qset->whc; unsigned long flags; - if (wurb->is_async == true) + if (wurb->is_async) asl_update(whc, WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB | WUSBCMD_ASYNC_QSET_RM); diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 9fe3225e6c61..04ce6b156b35 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -91,66 +91,39 @@ #include <linux/io.h> /** - * Return the next extended capability pointer register. - * - * @base PCI register base address. - * - * @ext_offset Offset of the 32-bit register that contains the extended - * capabilites pointer. If searching for the first extended capability, pass - * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability, - * pass in the offset of the current extended capability register. - * - * Returns 0 if there is no next extended capability register or returns the register offset - * from the PCI registers base address. - */ -static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset) -{ - u32 next; - - next = readl(base + ext_offset); - - if (ext_offset == XHCI_HCC_PARAMS_OFFSET) { - /* Find the first extended capability */ - next = XHCI_HCC_EXT_CAPS(next); - ext_offset = 0; - } else { - /* Find the next extended capability */ - next = XHCI_EXT_CAPS_NEXT(next); - } - - if (!next) - return 0; - /* - * Address calculation from offset of extended capabilities - * (or HCCPARAMS) register - see section 5.3.6 and section 7. - */ - return ext_offset + (next << 2); -} - -/** * Find the offset of the extended capabilities with capability ID id. * - * @base PCI MMIO registers base address. - * @ext_offset Offset from base of the first extended capability to look at, - * or the address of HCCPARAMS. - * @id Extended capability ID to search for. + * @base PCI MMIO registers base address. + * @start address at which to start looking, (0 or HCC_PARAMS to start at + * beginning of list) + * @id Extended capability ID to search for. * - * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities - * to make sure that the list doesn't contain a loop. + * Returns the offset of the next matching extended capability structure. + * Some capabilities can occur several times, e.g., the XHCI_EXT_CAPS_PROTOCOL, + * and this provides a way to find them all. */ -static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id) + +static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) { u32 val; - int limit = XHCI_MAX_EXT_CAPS; - - while (ext_offset && limit > 0) { - val = readl(base + ext_offset); - if (XHCI_EXT_CAPS_ID(val) == id) - break; - ext_offset = xhci_find_next_cap_offset(base, ext_offset); - limit--; - } - if (limit > 0) - return ext_offset; + u32 next; + u32 offset; + + offset = start; + if (!start || start == XHCI_HCC_PARAMS_OFFSET) { + val = readl(base + XHCI_HCC_PARAMS_OFFSET); + offset = XHCI_HCC_EXT_CAPS(val) << 2; + if (!offset) + return 0; + }; + do { + val = readl(base + offset); + if (XHCI_EXT_CAPS_ID(val) == id && offset != start) + return offset; + + next = XHCI_EXT_CAPS_NEXT(val); + offset += next << 2; + } while (next); + return 0; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index f980c239eded..b30b4ce294d3 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -855,7 +855,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, xhci_hub_report_usb2_link_state(&status, raw_port_status); } if (bus_state->port_c_suspend & (1 << wIndex)) - status |= 1 << USB_PORT_FEAT_C_SUSPEND; + status |= USB_PORT_STAT_C_SUSPEND << 16; return status; } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c48cbe731356..5cd080e0a685 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -47,13 +47,12 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, if (!seg) return NULL; - seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); + seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); if (!seg->trbs) { kfree(seg); return NULL; } - memset(seg->trbs, 0, TRB_SEGMENT_SIZE); /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) @@ -517,12 +516,11 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(xhci->hcc_params); - ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); + ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); if (!ctx->bytes) { kfree(ctx); return NULL; } - memset(ctx->bytes, 0, ctx->size); return ctx; } @@ -1245,7 +1243,7 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, interval = fls(desc_interval) - 1; interval = clamp_val(interval, min_exponent, max_exponent); if ((1 << interval) != desc_interval) - dev_warn(&udev->dev, + dev_dbg(&udev->dev, "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", ep->desc.bEndpointAddress, 1 << interval, @@ -2064,17 +2062,19 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, - __le32 __iomem *addr, u8 major_revision, int max_caps) + __le32 __iomem *addr, int max_caps) { u32 temp, port_offset, port_count; int i; + u8 major_revision; struct xhci_hub *rhub; temp = readl(addr); + major_revision = XHCI_EXT_PORT_MAJOR(temp); - if (XHCI_EXT_PORT_MAJOR(temp) == 0x03) { + if (major_revision == 0x03) { rhub = &xhci->usb3_rhub; - } else if (XHCI_EXT_PORT_MAJOR(temp) <= 0x02) { + } else if (major_revision <= 0x02) { rhub = &xhci->usb2_rhub; } else { xhci_warn(xhci, "Ignoring unknown port speed, " @@ -2190,19 +2190,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, */ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) { - __le32 __iomem *addr, *tmp_addr; - u32 offset, tmp_offset; + void __iomem *base; + u32 offset; unsigned int num_ports; int i, j, port_index; int cap_count = 0; - - addr = &xhci->cap_regs->hcc_params; - offset = XHCI_HCC_EXT_CAPS(readl(addr)); - if (offset == 0) { - xhci_err(xhci, "No Extended Capability registers, " - "unable to set up roothub.\n"); - return -ENODEV; - } + u32 cap_start; num_ports = HCS_MAX_PORTS(xhci->hcs_params1); xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); @@ -2220,48 +2213,34 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) for (j = 0; j < XHCI_MAX_INTERVAL; j++) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); } + base = &xhci->cap_regs->hc_capbase; - /* - * For whatever reason, the first capability offset is from the - * capability register base, not from the HCCPARAMS register. - * See section 5.3.6 for offset calculation. - */ - addr = &xhci->cap_regs->hc_capbase + offset; - - tmp_addr = addr; - tmp_offset = offset; + cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL); + if (!cap_start) { + xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n"); + return -ENODEV; + } + offset = cap_start; /* count extended protocol capability entries for later caching */ - do { - u32 cap_id; - cap_id = readl(tmp_addr); - if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) - cap_count++; - tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id); - tmp_addr += tmp_offset; - } while (tmp_offset); + while (offset) { + cap_count++; + offset = xhci_find_next_ext_cap(base, offset, + XHCI_EXT_CAPS_PROTOCOL); + } xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags); if (!xhci->ext_caps) return -ENOMEM; - while (1) { - u32 cap_id; - - cap_id = readl(addr); - if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) - xhci_add_in_port(xhci, num_ports, addr, - (u8) XHCI_EXT_PORT_MAJOR(cap_id), - cap_count); - offset = XHCI_EXT_CAPS_NEXT(cap_id); - if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) - == num_ports) + offset = cap_start; + + while (offset) { + xhci_add_in_port(xhci, num_ports, base + offset, cap_count); + if (xhci->num_usb2_ports + xhci->num_usb3_ports == num_ports) break; - /* - * Once you're into the Extended Capabilities, the offset is - * always relative to the register holding the offset. - */ - addr += offset; + offset = xhci_find_next_ext_cap(base, offset, + XHCI_EXT_CAPS_PROTOCOL); } if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c new file mode 100644 index 000000000000..c30de7c39f44 --- /dev/null +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: + * Zhigang.Wei <zhigang.wei@mediatek.com> + * Chunfeng.Yun <chunfeng.yun@mediatek.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "xhci.h" +#include "xhci-mtk.h" + +#define SS_BW_BOUNDARY 51000 +/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */ +#define HS_BW_BOUNDARY 6144 +/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */ +#define FS_PAYLOAD_MAX 188 + +/* mtk scheduler bitmasks */ +#define EP_BPKTS(p) ((p) & 0x3f) +#define EP_BCSCOUNT(p) (((p) & 0x7) << 8) +#define EP_BBM(p) ((p) << 11) +#define EP_BOFFSET(p) ((p) & 0x3fff) +#define EP_BREPEAT(p) (((p) & 0x7fff) << 16) + +static int is_fs_or_ls(enum usb_device_speed speed) +{ + return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW; +} + +/* +* get the index of bandwidth domains array which @ep belongs to. +* +* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk, +* each HS root port is treated as a single bandwidth domain, +* but each SS root port is treated as two bandwidth domains, one for IN eps, +* one for OUT eps. +* @real_port value is defined as follow according to xHCI spec: +* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc +* so the bandwidth domain array is organized as follow for simplification: +* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY +*/ +static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_virt_device *virt_dev; + int bw_index; + + virt_dev = xhci->devs[udev->slot_id]; + + if (udev->speed == USB_SPEED_SUPER) { + if (usb_endpoint_dir_out(&ep->desc)) + bw_index = (virt_dev->real_port - 1) * 2; + else + bw_index = (virt_dev->real_port - 1) * 2 + 1; + } else { + /* add one more for each SS port */ + bw_index = virt_dev->real_port + xhci->num_usb3_ports - 1; + } + + return bw_index; +} + +static void setup_sch_info(struct usb_device *udev, + struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep) +{ + u32 ep_type; + u32 ep_interval; + u32 max_packet_size; + u32 max_burst; + u32 mult; + u32 esit_pkts; + + ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); + ep_interval = CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info)); + max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); + max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2)); + mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info)); + + sch_ep->esit = 1 << ep_interval; + sch_ep->offset = 0; + sch_ep->burst_mode = 0; + + if (udev->speed == USB_SPEED_HIGH) { + sch_ep->cs_count = 0; + + /* + * usb_20 spec section5.9 + * a single microframe is enough for HS synchromous endpoints + * in a interval + */ + sch_ep->num_budget_microframes = 1; + sch_ep->repeat = 0; + + /* + * xHCI spec section6.2.3.4 + * @max_burst is the number of additional transactions + * opportunities per microframe + */ + sch_ep->pkts = max_burst + 1; + sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts; + } else if (udev->speed == USB_SPEED_SUPER) { + /* usb3_r1 spec section4.4.7 & 4.4.8 */ + sch_ep->cs_count = 0; + esit_pkts = (mult + 1) * (max_burst + 1); + if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) { + sch_ep->pkts = esit_pkts; + sch_ep->num_budget_microframes = 1; + sch_ep->repeat = 0; + } + + if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) { + if (esit_pkts <= sch_ep->esit) + sch_ep->pkts = 1; + else + sch_ep->pkts = roundup_pow_of_two(esit_pkts) + / sch_ep->esit; + + sch_ep->num_budget_microframes = + DIV_ROUND_UP(esit_pkts, sch_ep->pkts); + + if (sch_ep->num_budget_microframes > 1) + sch_ep->repeat = 1; + else + sch_ep->repeat = 0; + } + sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts; + } else if (is_fs_or_ls(udev->speed)) { + + /* + * usb_20 spec section11.18.4 + * assume worst cases + */ + sch_ep->repeat = 0; + sch_ep->pkts = 1; /* at most one packet for each microframe */ + if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) { + sch_ep->cs_count = 3; /* at most need 3 CS*/ + /* one for SS and one for budgeted transaction */ + sch_ep->num_budget_microframes = sch_ep->cs_count + 2; + sch_ep->bw_cost_per_microframe = max_packet_size; + } + if (ep_type == ISOC_OUT_EP) { + + /* + * the best case FS budget assumes that 188 FS bytes + * occur in each microframe + */ + sch_ep->num_budget_microframes = DIV_ROUND_UP( + max_packet_size, FS_PAYLOAD_MAX); + sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX; + sch_ep->cs_count = sch_ep->num_budget_microframes; + } + if (ep_type == ISOC_IN_EP) { + /* at most need additional two CS. */ + sch_ep->cs_count = DIV_ROUND_UP( + max_packet_size, FS_PAYLOAD_MAX) + 2; + sch_ep->num_budget_microframes = sch_ep->cs_count + 2; + sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX; + } + } +} + +/* Get maximum bandwidth when we schedule at offset slot. */ +static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw, + struct mu3h_sch_ep_info *sch_ep, u32 offset) +{ + u32 num_esit; + u32 max_bw = 0; + int i; + int j; + + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + for (i = 0; i < num_esit; i++) { + u32 base = offset + i * sch_ep->esit; + + for (j = 0; j < sch_ep->num_budget_microframes; j++) { + if (sch_bw->bus_bw[base + j] > max_bw) + max_bw = sch_bw->bus_bw[base + j]; + } + } + return max_bw; +} + +static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, + struct mu3h_sch_ep_info *sch_ep, int bw_cost) +{ + u32 num_esit; + u32 base; + int i; + int j; + + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + for (i = 0; i < num_esit; i++) { + base = sch_ep->offset + i * sch_ep->esit; + for (j = 0; j < sch_ep->num_budget_microframes; j++) + sch_bw->bus_bw[base + j] += bw_cost; + } +} + +static int check_sch_bw(struct usb_device *udev, + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) +{ + u32 offset; + u32 esit; + u32 num_budget_microframes; + u32 min_bw; + u32 min_index; + u32 worst_bw; + u32 bw_boundary; + + if (sch_ep->esit > XHCI_MTK_MAX_ESIT) + sch_ep->esit = XHCI_MTK_MAX_ESIT; + + esit = sch_ep->esit; + num_budget_microframes = sch_ep->num_budget_microframes; + + /* + * Search through all possible schedule microframes. + * and find a microframe where its worst bandwidth is minimum. + */ + min_bw = ~0; + min_index = 0; + for (offset = 0; offset < esit; offset++) { + if ((offset + num_budget_microframes) > sch_ep->esit) + break; + + /* + * usb_20 spec section11.18: + * must never schedule Start-Split in Y6 + */ + if (is_fs_or_ls(udev->speed) && (offset % 8 == 6)) + continue; + + worst_bw = get_max_bw(sch_bw, sch_ep, offset); + if (min_bw > worst_bw) { + min_bw = worst_bw; + min_index = offset; + } + if (min_bw == 0) + break; + } + sch_ep->offset = min_index; + + bw_boundary = (udev->speed == USB_SPEED_SUPER) + ? SS_BW_BOUNDARY : HS_BW_BOUNDARY; + + /* check bandwidth */ + if (min_bw + sch_ep->bw_cost_per_microframe > bw_boundary) + return -ERANGE; + + /* update bus bandwidth info */ + update_bus_bw(sch_bw, sch_ep, sch_ep->bw_cost_per_microframe); + + return 0; +} + +static bool need_bw_sch(struct usb_host_endpoint *ep, + enum usb_device_speed speed, int has_tt) +{ + /* only for periodic endpoints */ + if (usb_endpoint_xfer_control(&ep->desc) + || usb_endpoint_xfer_bulk(&ep->desc)) + return false; + + /* + * for LS & FS periodic endpoints which its device don't attach + * to TT are also ignored, root-hub will schedule them directly + */ + if (is_fs_or_ls(speed) && !has_tt) + return false; + + return true; +} + +int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) +{ + struct mu3h_sch_bw_info *sch_array; + int num_usb_bus; + int i; + + /* ss IN and OUT are separated */ + num_usb_bus = mtk->num_u3_ports * 2 + mtk->num_u2_ports; + + sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL); + if (sch_array == NULL) + return -ENOMEM; + + for (i = 0; i < num_usb_bus; i++) + INIT_LIST_HEAD(&sch_array[i].bw_ep_list); + + mtk->sch_array = sch_array; + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); + +void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk) +{ + kfree(mtk->sch_array); +} +EXPORT_SYMBOL_GPL(xhci_mtk_sch_exit); + +int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_virt_device *virt_dev; + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep; + struct mu3h_sch_bw_info *sch_array; + unsigned int ep_index; + int bw_index; + int ret = 0; + + xhci = hcd_to_xhci(hcd); + virt_dev = xhci->devs[udev->slot_id]; + ep_index = xhci_get_endpoint_index(&ep->desc); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + sch_array = mtk->sch_array; + + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", + __func__, usb_endpoint_type(&ep->desc), udev->speed, + GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_dir_in(&ep->desc), ep); + + if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) + return 0; + + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &sch_array[bw_index]; + + sch_ep = kzalloc(sizeof(struct mu3h_sch_ep_info), GFP_NOIO); + if (!sch_ep) + return -ENOMEM; + + setup_sch_info(udev, ep_ctx, sch_ep); + + ret = check_sch_bw(udev, sch_bw, sch_ep); + if (ret) { + xhci_err(xhci, "Not enough bandwidth!\n"); + kfree(sch_ep); + return -ENOSPC; + } + + list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); + sch_ep->ep = ep; + + ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) + | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); + ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) + | EP_BREPEAT(sch_ep->repeat)); + + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", + sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, + sch_ep->offset, sch_ep->repeat); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk); + +void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci; + struct xhci_slot_ctx *slot_ctx; + struct xhci_virt_device *virt_dev; + struct mu3h_sch_bw_info *sch_array; + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep; + int bw_index; + + xhci = hcd_to_xhci(hcd); + virt_dev = xhci->devs[udev->slot_id]; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + sch_array = mtk->sch_array; + + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n", + __func__, usb_endpoint_type(&ep->desc), udev->speed, + GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_dir_in(&ep->desc), ep); + + if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) + return; + + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &sch_array[bw_index]; + + list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { + if (sch_ep->ep == ep) { + update_bus_bw(sch_bw, sch_ep, + -sch_ep->bw_cost_per_microframe); + list_del(&sch_ep->endpoint); + kfree(sch_ep); + break; + } + } +} +EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c new file mode 100644 index 000000000000..c9ab6a44c34a --- /dev/null +++ b/drivers/usb/host/xhci-mtk.c @@ -0,0 +1,763 @@ +/* + * MediaTek xHCI Host Controller Driver + * + * Copyright (c) 2015 MediaTek Inc. + * Author: + * Chunfeng Yun <chunfeng.yun@mediatek.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include "xhci.h" +#include "xhci-mtk.h" + +/* ip_pw_ctrl0 register */ +#define CTRL0_IP_SW_RST BIT(0) + +/* ip_pw_ctrl1 register */ +#define CTRL1_IP_HOST_PDN BIT(0) + +/* ip_pw_ctrl2 register */ +#define CTRL2_IP_DEV_PDN BIT(0) + +/* ip_pw_sts1 register */ +#define STS1_IP_SLEEP_STS BIT(30) +#define STS1_XHCI_RST BIT(11) +#define STS1_SYS125_RST BIT(10) +#define STS1_REF_RST BIT(8) +#define STS1_SYSPLL_STABLE BIT(0) + +/* ip_xhci_cap register */ +#define CAP_U3_PORT_NUM(p) ((p) & 0xff) +#define CAP_U2_PORT_NUM(p) (((p) >> 8) & 0xff) + +/* u3_ctrl_p register */ +#define CTRL_U3_PORT_HOST_SEL BIT(2) +#define CTRL_U3_PORT_PDN BIT(1) +#define CTRL_U3_PORT_DIS BIT(0) + +/* u2_ctrl_p register */ +#define CTRL_U2_PORT_HOST_SEL BIT(2) +#define CTRL_U2_PORT_PDN BIT(1) +#define CTRL_U2_PORT_DIS BIT(0) + +/* u2_phy_pll register */ +#define CTRL_U2_FORCE_PLL_STB BIT(28) + +#define PERI_WK_CTRL0 0x400 +#define UWK_CTR0_0P_LS_PE BIT(8) /* posedge */ +#define UWK_CTR0_0P_LS_NE BIT(7) /* negedge for 0p linestate*/ +#define UWK_CTL1_1P_LS_C(x) (((x) & 0xf) << 1) +#define UWK_CTL1_1P_LS_E BIT(0) + +#define PERI_WK_CTRL1 0x404 +#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26) +#define UWK_CTL1_IS_E BIT(25) +#define UWK_CTL1_0P_LS_C(x) (((x) & 0xf) << 21) +#define UWK_CTL1_0P_LS_E BIT(20) +#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */ +#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */ +#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */ +#define UWK_CTL1_0P_LS_P BIT(7) +#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */ + +enum ssusb_wakeup_src { + SSUSB_WK_IP_SLEEP = 1, + SSUSB_WK_LINE_STATE = 2, +}; + +static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) +{ + struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; + u32 value, check_val; + int ret; + int i; + + /* power on host ip */ + value = readl(&ippc->ip_pw_ctr1); + value &= ~CTRL1_IP_HOST_PDN; + writel(value, &ippc->ip_pw_ctr1); + + /* power on and enable all u3 ports */ + for (i = 0; i < mtk->num_u3_ports; i++) { + value = readl(&ippc->u3_ctrl_p[i]); + value &= ~(CTRL_U3_PORT_PDN | CTRL_U3_PORT_DIS); + value |= CTRL_U3_PORT_HOST_SEL; + writel(value, &ippc->u3_ctrl_p[i]); + } + + /* power on and enable all u2 ports */ + for (i = 0; i < mtk->num_u2_ports; i++) { + value = readl(&ippc->u2_ctrl_p[i]); + value &= ~(CTRL_U2_PORT_PDN | CTRL_U2_PORT_DIS); + value |= CTRL_U2_PORT_HOST_SEL; + writel(value, &ippc->u2_ctrl_p[i]); + } + + /* + * wait for clocks to be stable, and clock domains reset to + * be inactive after power on and enable ports + */ + check_val = STS1_SYSPLL_STABLE | STS1_REF_RST | + STS1_SYS125_RST | STS1_XHCI_RST; + + ret = readl_poll_timeout(&ippc->ip_pw_sts1, value, + (check_val == (value & check_val)), 100, 20000); + if (ret) { + dev_err(mtk->dev, "clocks are not stable (0x%x)\n", value); + return ret; + } + + return 0; +} + +static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk) +{ + struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; + u32 value; + int ret; + int i; + + /* power down all u3 ports */ + for (i = 0; i < mtk->num_u3_ports; i++) { + value = readl(&ippc->u3_ctrl_p[i]); + value |= CTRL_U3_PORT_PDN; + writel(value, &ippc->u3_ctrl_p[i]); + } + + /* power down all u2 ports */ + for (i = 0; i < mtk->num_u2_ports; i++) { + value = readl(&ippc->u2_ctrl_p[i]); + value |= CTRL_U2_PORT_PDN; + writel(value, &ippc->u2_ctrl_p[i]); + } + + /* power down host ip */ + value = readl(&ippc->ip_pw_ctr1); + value |= CTRL1_IP_HOST_PDN; + writel(value, &ippc->ip_pw_ctr1); + + /* wait for host ip to sleep */ + ret = readl_poll_timeout(&ippc->ip_pw_sts1, value, + (value & STS1_IP_SLEEP_STS), 100, 100000); + if (ret) { + dev_err(mtk->dev, "ip sleep failed!!!\n"); + return ret; + } + return 0; +} + +static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk) +{ + struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; + u32 value; + + /* reset whole ip */ + value = readl(&ippc->ip_pw_ctr0); + value |= CTRL0_IP_SW_RST; + writel(value, &ippc->ip_pw_ctr0); + udelay(1); + value = readl(&ippc->ip_pw_ctr0); + value &= ~CTRL0_IP_SW_RST; + writel(value, &ippc->ip_pw_ctr0); + + /* + * device ip is default power-on in fact + * power down device ip, otherwise ip-sleep will fail + */ + value = readl(&ippc->ip_pw_ctr2); + value |= CTRL2_IP_DEV_PDN; + writel(value, &ippc->ip_pw_ctr2); + + value = readl(&ippc->ip_xhci_cap); + mtk->num_u3_ports = CAP_U3_PORT_NUM(value); + mtk->num_u2_ports = CAP_U2_PORT_NUM(value); + dev_dbg(mtk->dev, "%s u2p:%d, u3p:%d\n", __func__, + mtk->num_u2_ports, mtk->num_u3_ports); + + return xhci_mtk_host_enable(mtk); +} + +static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk) +{ + int ret; + + ret = clk_prepare_enable(mtk->sys_clk); + if (ret) { + dev_err(mtk->dev, "failed to enable sys_clk\n"); + goto sys_clk_err; + } + + if (mtk->wakeup_src) { + ret = clk_prepare_enable(mtk->wk_deb_p0); + if (ret) { + dev_err(mtk->dev, "failed to enable wk_deb_p0\n"); + goto usb_p0_err; + } + + ret = clk_prepare_enable(mtk->wk_deb_p1); + if (ret) { + dev_err(mtk->dev, "failed to enable wk_deb_p1\n"); + goto usb_p1_err; + } + } + return 0; + +usb_p1_err: + clk_disable_unprepare(mtk->wk_deb_p0); +usb_p0_err: + clk_disable_unprepare(mtk->sys_clk); +sys_clk_err: + return -EINVAL; +} + +static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk) +{ + if (mtk->wakeup_src) { + clk_disable_unprepare(mtk->wk_deb_p1); + clk_disable_unprepare(mtk->wk_deb_p0); + } + clk_disable_unprepare(mtk->sys_clk); +} + +/* only clocks can be turn off for ip-sleep wakeup mode */ +static void usb_wakeup_ip_sleep_en(struct xhci_hcd_mtk *mtk) +{ + u32 tmp; + struct regmap *pericfg = mtk->pericfg; + + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_IS_P; + tmp &= ~(UWK_CTL1_IS_C(0xf)); + tmp |= UWK_CTL1_IS_C(0x8); + regmap_write(pericfg, PERI_WK_CTRL1, tmp); + regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E); + + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + dev_dbg(mtk->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n", + __func__, tmp); +} + +static void usb_wakeup_ip_sleep_dis(struct xhci_hcd_mtk *mtk) +{ + u32 tmp; + + regmap_read(mtk->pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_IS_E; + regmap_write(mtk->pericfg, PERI_WK_CTRL1, tmp); +} + +/* +* for line-state wakeup mode, phy's power should not power-down +* and only support cable plug in/out +*/ +static void usb_wakeup_line_state_en(struct xhci_hcd_mtk *mtk) +{ + u32 tmp; + struct regmap *pericfg = mtk->pericfg; + + /* line-state of u2-port0 */ + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_0P_LS_P; + tmp &= ~(UWK_CTL1_0P_LS_C(0xf)); + tmp |= UWK_CTL1_0P_LS_C(0x8); + regmap_write(pericfg, PERI_WK_CTRL1, tmp); + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_0P_LS_E); + + /* line-state of u2-port1 */ + regmap_read(pericfg, PERI_WK_CTRL0, &tmp); + tmp &= ~(UWK_CTL1_1P_LS_C(0xf)); + tmp |= UWK_CTL1_1P_LS_C(0x8); + regmap_write(pericfg, PERI_WK_CTRL0, tmp); + regmap_write(pericfg, PERI_WK_CTRL0, tmp | UWK_CTL1_1P_LS_E); +} + +static void usb_wakeup_line_state_dis(struct xhci_hcd_mtk *mtk) +{ + u32 tmp; + struct regmap *pericfg = mtk->pericfg; + + /* line-state of u2-port0 */ + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_0P_LS_E; + regmap_write(pericfg, PERI_WK_CTRL1, tmp); + + /* line-state of u2-port1 */ + regmap_read(pericfg, PERI_WK_CTRL0, &tmp); + tmp &= ~UWK_CTL1_1P_LS_E; + regmap_write(pericfg, PERI_WK_CTRL0, tmp); +} + +static void usb_wakeup_enable(struct xhci_hcd_mtk *mtk) +{ + if (mtk->wakeup_src == SSUSB_WK_IP_SLEEP) + usb_wakeup_ip_sleep_en(mtk); + else if (mtk->wakeup_src == SSUSB_WK_LINE_STATE) + usb_wakeup_line_state_en(mtk); +} + +static void usb_wakeup_disable(struct xhci_hcd_mtk *mtk) +{ + if (mtk->wakeup_src == SSUSB_WK_IP_SLEEP) + usb_wakeup_ip_sleep_dis(mtk); + else if (mtk->wakeup_src == SSUSB_WK_LINE_STATE) + usb_wakeup_line_state_dis(mtk); +} + +static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk, + struct device_node *dn) +{ + struct device *dev = mtk->dev; + + /* + * wakeup function is optional, so it is not an error if this property + * does not exist, and in such case, no need to get relative + * properties anymore. + */ + of_property_read_u32(dn, "mediatek,wakeup-src", &mtk->wakeup_src); + if (!mtk->wakeup_src) + return 0; + + mtk->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0"); + if (IS_ERR(mtk->wk_deb_p0)) { + dev_err(dev, "fail to get wakeup_deb_p0\n"); + return PTR_ERR(mtk->wk_deb_p0); + } + + mtk->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1"); + if (IS_ERR(mtk->wk_deb_p1)) { + dev_err(dev, "fail to get wakeup_deb_p1\n"); + return PTR_ERR(mtk->wk_deb_p1); + } + + mtk->pericfg = syscon_regmap_lookup_by_phandle(dn, + "mediatek,syscon-wakeup"); + if (IS_ERR(mtk->pericfg)) { + dev_err(dev, "fail to get pericfg regs\n"); + return PTR_ERR(mtk->pericfg); + } + + return 0; +} + +static int xhci_mtk_setup(struct usb_hcd *hcd); +static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { + .extra_priv_size = sizeof(struct xhci_hcd), + .reset = xhci_mtk_setup, +}; + +static struct hc_driver __read_mostly xhci_mtk_hc_driver; + +static int xhci_mtk_phy_init(struct xhci_hcd_mtk *mtk) +{ + int i; + int ret; + + for (i = 0; i < mtk->num_phys; i++) { + ret = phy_init(mtk->phys[i]); + if (ret) + goto exit_phy; + } + return 0; + +exit_phy: + for (; i > 0; i--) + phy_exit(mtk->phys[i - 1]); + + return ret; +} + +static int xhci_mtk_phy_exit(struct xhci_hcd_mtk *mtk) +{ + int i; + + for (i = 0; i < mtk->num_phys; i++) + phy_exit(mtk->phys[i]); + + return 0; +} + +static int xhci_mtk_phy_power_on(struct xhci_hcd_mtk *mtk) +{ + int i; + int ret; + + for (i = 0; i < mtk->num_phys; i++) { + ret = phy_power_on(mtk->phys[i]); + if (ret) + goto power_off_phy; + } + return 0; + +power_off_phy: + for (; i > 0; i--) + phy_power_off(mtk->phys[i - 1]); + + return ret; +} + +static void xhci_mtk_phy_power_off(struct xhci_hcd_mtk *mtk) +{ + unsigned int i; + + for (i = 0; i < mtk->num_phys; i++) + phy_power_off(mtk->phys[i]); +} + +static int xhci_mtk_ldos_enable(struct xhci_hcd_mtk *mtk) +{ + int ret; + + ret = regulator_enable(mtk->vbus); + if (ret) { + dev_err(mtk->dev, "failed to enable vbus\n"); + return ret; + } + + ret = regulator_enable(mtk->vusb33); + if (ret) { + dev_err(mtk->dev, "failed to enable vusb33\n"); + regulator_disable(mtk->vbus); + return ret; + } + return 0; +} + +static void xhci_mtk_ldos_disable(struct xhci_hcd_mtk *mtk) +{ + regulator_disable(mtk->vbus); + regulator_disable(mtk->vusb33); +} + +static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + + /* + * As of now platform drivers don't provide MSI support so we ensure + * here that the generic code does not try to make a pci_dev from our + * dev struct in order to setup MSI + */ + xhci->quirks |= XHCI_PLAT; + xhci->quirks |= XHCI_MTK_HOST; + /* + * MTK host controller gives a spurious successful event after a + * short transfer. Ignore it. + */ + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (mtk->lpm_support) + xhci->quirks |= XHCI_LPM_SUPPORT; +} + +/* called during probe() after chip reset completes */ +static int xhci_mtk_setup(struct usb_hcd *hcd) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + int ret; + + if (usb_hcd_is_primary_hcd(hcd)) { + ret = xhci_mtk_ssusb_config(mtk); + if (ret) + return ret; + ret = xhci_mtk_sch_init(mtk); + if (ret) + return ret; + } + + return xhci_gen_setup(hcd, xhci_mtk_quirks); +} + +static int xhci_mtk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct xhci_hcd_mtk *mtk; + const struct hc_driver *driver; + struct xhci_hcd *xhci; + struct resource *res; + struct usb_hcd *hcd; + struct phy *phy; + int phy_num; + int ret = -ENODEV; + int irq; + + if (usb_disabled()) + return -ENODEV; + + driver = &xhci_mtk_hc_driver; + mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL); + if (!mtk) + return -ENOMEM; + + mtk->dev = dev; + mtk->vbus = devm_regulator_get(dev, "vbus"); + if (IS_ERR(mtk->vbus)) { + dev_err(dev, "fail to get vbus\n"); + return PTR_ERR(mtk->vbus); + } + + mtk->vusb33 = devm_regulator_get(dev, "vusb33"); + if (IS_ERR(mtk->vusb33)) { + dev_err(dev, "fail to get vusb33\n"); + return PTR_ERR(mtk->vusb33); + } + + mtk->sys_clk = devm_clk_get(dev, "sys_ck"); + if (IS_ERR(mtk->sys_clk)) { + dev_err(dev, "fail to get sys_ck\n"); + return PTR_ERR(mtk->sys_clk); + } + + mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable"); + + ret = usb_wakeup_of_property_parse(mtk, node); + if (ret) + return ret; + + mtk->num_phys = of_count_phandle_with_args(node, + "phys", "#phy-cells"); + if (mtk->num_phys > 0) { + mtk->phys = devm_kcalloc(dev, mtk->num_phys, + sizeof(*mtk->phys), GFP_KERNEL); + if (!mtk->phys) + return -ENOMEM; + } else { + mtk->num_phys = 0; + } + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + device_enable_async_suspend(dev); + + ret = xhci_mtk_ldos_enable(mtk); + if (ret) + goto disable_pm; + + ret = xhci_mtk_clks_enable(mtk); + if (ret) + goto disable_ldos; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + goto disable_clk; + + /* Initialize dma_mask and coherent_dma_mask to 32-bits */ + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (ret) + goto disable_clk; + + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + else + dma_set_mask(dev, DMA_BIT_MASK(32)); + + hcd = usb_create_hcd(driver, dev, dev_name(dev)); + if (!hcd) { + ret = -ENOMEM; + goto disable_clk; + } + + /* + * USB 2.0 roothub is stored in the platform_device. + * Swap it with mtk HCD. + */ + mtk->hcd = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, mtk); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hcd->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hcd->regs)) { + ret = PTR_ERR(hcd->regs); + goto put_usb2_hcd; + } + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + mtk->ippc_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(mtk->ippc_regs)) { + ret = PTR_ERR(mtk->ippc_regs); + goto put_usb2_hcd; + } + + for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) { + phy = devm_of_phy_get_by_index(dev, node, phy_num); + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + goto put_usb2_hcd; + } + mtk->phys[phy_num] = phy; + } + + ret = xhci_mtk_phy_init(mtk); + if (ret) + goto put_usb2_hcd; + + ret = xhci_mtk_phy_power_on(mtk); + if (ret) + goto exit_phys; + + device_init_wakeup(dev, true); + + xhci = hcd_to_xhci(hcd); + xhci->main_hcd = hcd; + xhci->shared_hcd = usb_create_shared_hcd(driver, dev, + dev_name(dev), hcd); + if (!xhci->shared_hcd) { + ret = -ENOMEM; + goto power_off_phys; + } + + if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + xhci->shared_hcd->can_do_streams = 1; + + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (ret) + goto put_usb3_hcd; + + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + if (ret) + goto dealloc_usb2_hcd; + + return 0; + +dealloc_usb2_hcd: + usb_remove_hcd(hcd); + +put_usb3_hcd: + xhci_mtk_sch_exit(mtk); + usb_put_hcd(xhci->shared_hcd); + +power_off_phys: + xhci_mtk_phy_power_off(mtk); + device_init_wakeup(dev, false); + +exit_phys: + xhci_mtk_phy_exit(mtk); + +put_usb2_hcd: + usb_put_hcd(hcd); + +disable_clk: + xhci_mtk_clks_disable(mtk); + +disable_ldos: + xhci_mtk_ldos_disable(mtk); + +disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static int xhci_mtk_remove(struct platform_device *dev) +{ + struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); + struct usb_hcd *hcd = mtk->hcd; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + usb_remove_hcd(xhci->shared_hcd); + xhci_mtk_phy_power_off(mtk); + xhci_mtk_phy_exit(mtk); + device_init_wakeup(&dev->dev, false); + + usb_remove_hcd(hcd); + usb_put_hcd(xhci->shared_hcd); + usb_put_hcd(hcd); + xhci_mtk_sch_exit(mtk); + xhci_mtk_clks_disable(mtk); + xhci_mtk_ldos_disable(mtk); + pm_runtime_put_sync(&dev->dev); + pm_runtime_disable(&dev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int xhci_mtk_suspend(struct device *dev) +{ + struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); + + xhci_mtk_host_disable(mtk); + xhci_mtk_phy_power_off(mtk); + xhci_mtk_clks_disable(mtk); + usb_wakeup_enable(mtk); + return 0; +} + +static int xhci_mtk_resume(struct device *dev) +{ + struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); + + usb_wakeup_disable(mtk); + xhci_mtk_clks_enable(mtk); + xhci_mtk_phy_power_on(mtk); + xhci_mtk_host_enable(mtk); + return 0; +} + +static const struct dev_pm_ops xhci_mtk_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xhci_mtk_suspend, xhci_mtk_resume) +}; +#define DEV_PM_OPS (&xhci_mtk_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +#ifdef CONFIG_OF +static const struct of_device_id mtk_xhci_of_match[] = { + { .compatible = "mediatek,mt8173-xhci"}, + { }, +}; +MODULE_DEVICE_TABLE(of, mtk_xhci_of_match); +#endif + +static struct platform_driver mtk_xhci_driver = { + .probe = xhci_mtk_probe, + .remove = xhci_mtk_remove, + .driver = { + .name = "xhci-mtk", + .pm = DEV_PM_OPS, + .of_match_table = of_match_ptr(mtk_xhci_of_match), + }, +}; +MODULE_ALIAS("platform:xhci-mtk"); + +static int __init xhci_mtk_init(void) +{ + xhci_init_driver(&xhci_mtk_hc_driver, &xhci_mtk_overrides); + return platform_driver_register(&mtk_xhci_driver); +} +module_init(xhci_mtk_init); + +static void __exit xhci_mtk_exit(void) +{ + platform_driver_unregister(&mtk_xhci_driver); +} +module_exit(xhci_mtk_exit); + +MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek xHCI Host Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h new file mode 100644 index 000000000000..7da677c79ea8 --- /dev/null +++ b/drivers/usb/host/xhci-mtk.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: + * Zhigang.Wei <zhigang.wei@mediatek.com> + * Chunfeng.Yun <chunfeng.yun@mediatek.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _XHCI_MTK_H_ +#define _XHCI_MTK_H_ + +#include "xhci.h" + +/** + * To simplify scheduler algorithm, set a upper limit for ESIT, + * if a synchromous ep's ESIT is larger than @XHCI_MTK_MAX_ESIT, + * round down to the limit value, that means allocating more + * bandwidth to it. + */ +#define XHCI_MTK_MAX_ESIT 64 + +/** + * struct mu3h_sch_bw_info: schedule information for bandwidth domain + * + * @bus_bw: array to keep track of bandwidth already used at each uframes + * @bw_ep_list: eps in the bandwidth domain + * + * treat a HS root port as a bandwidth domain, but treat a SS root port as + * two bandwidth domains, one for IN eps and another for OUT eps. + */ +struct mu3h_sch_bw_info { + u32 bus_bw[XHCI_MTK_MAX_ESIT]; + struct list_head bw_ep_list; +}; + +/** + * struct mu3h_sch_ep_info: schedule information for endpoint + * + * @esit: unit is 125us, equal to 2 << Interval field in ep-context + * @num_budget_microframes: number of continuous uframes + * (@repeat==1) scheduled within the interval + * @bw_cost_per_microframe: bandwidth cost per microframe + * @endpoint: linked into bandwidth domain which it belongs to + * @ep: address of usb_host_endpoint struct + * @offset: which uframe of the interval that transfer should be + * scheduled first time within the interval + * @repeat: the time gap between two uframes that transfers are + * scheduled within a interval. in the simple algorithm, only + * assign 0 or 1 to it; 0 means using only one uframe in a + * interval, and 1 means using @num_budget_microframes + * continuous uframes + * @pkts: number of packets to be transferred in the scheduled uframes + * @cs_count: number of CS that host will trigger + * @burst_mode: burst mode for scheduling. 0: normal burst mode, + * distribute the bMaxBurst+1 packets for a single burst + * according to @pkts and @repeat, repeate the burst multiple + * times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets + * according to @pkts and @repeat. normal mode is used by + * default + */ +struct mu3h_sch_ep_info { + u32 esit; + u32 num_budget_microframes; + u32 bw_cost_per_microframe; + struct list_head endpoint; + void *ep; + /* + * mtk xHCI scheduling information put into reserved DWs + * in ep context + */ + u32 offset; + u32 repeat; + u32 pkts; + u32 cs_count; + u32 burst_mode; +}; + +#define MU3C_U3_PORT_MAX 4 +#define MU3C_U2_PORT_MAX 5 + +/** + * struct mu3c_ippc_regs: MTK ssusb ip port control registers + * @ip_pw_ctr0~3: ip power and clock control registers + * @ip_pw_sts1~2: ip power and clock status registers + * @ip_xhci_cap: ip xHCI capability register + * @u3_ctrl_p[x]: ip usb3 port x control register, only low 4bytes are used + * @u2_ctrl_p[x]: ip usb2 port x control register, only low 4bytes are used + * @u2_phy_pll: usb2 phy pll control register + */ +struct mu3c_ippc_regs { + __le32 ip_pw_ctr0; + __le32 ip_pw_ctr1; + __le32 ip_pw_ctr2; + __le32 ip_pw_ctr3; + __le32 ip_pw_sts1; + __le32 ip_pw_sts2; + __le32 reserved0[3]; + __le32 ip_xhci_cap; + __le32 reserved1[2]; + __le64 u3_ctrl_p[MU3C_U3_PORT_MAX]; + __le64 u2_ctrl_p[MU3C_U2_PORT_MAX]; + __le32 reserved2; + __le32 u2_phy_pll; + __le32 reserved3[33]; /* 0x80 ~ 0xff */ +}; + +struct xhci_hcd_mtk { + struct device *dev; + struct usb_hcd *hcd; + struct mu3h_sch_bw_info *sch_array; + struct mu3c_ippc_regs __iomem *ippc_regs; + int num_u2_ports; + int num_u3_ports; + struct regulator *vusb33; + struct regulator *vbus; + struct clk *sys_clk; /* sys and mac clock */ + struct clk *wk_deb_p0; /* port0's wakeup debounce clock */ + struct clk *wk_deb_p1; + struct regmap *pericfg; + struct phy **phys; + int num_phys; + int wakeup_src; + bool lpm_support; +}; + +static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd) +{ + return dev_get_drvdata(hcd->self.controller); +} + +#if IS_ENABLED(CONFIG_USB_XHCI_MTK) +int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk); +void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk); +int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); +void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + +#else +static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, + struct usb_device *udev, struct usb_host_endpoint *ep) +{ + return 0; +} + +static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, + struct usb_device *udev, struct usb_host_endpoint *ep) +{ +} + +#endif + +#endif /* _XHCI_MTK_H_ */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c62109091d12..58c43ed7ff3b 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -53,7 +53,6 @@ static struct hc_driver __read_mostly xhci_pci_hc_driver; static int xhci_pci_setup(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { - .extra_priv_size = sizeof(struct xhci_hcd), .reset = xhci_pci_setup, }; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 05647e6753cd..770b6b088797 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include "xhci.h" +#include "xhci-plat.h" #include "xhci-mvebu.h" #include "xhci-rcar.h" @@ -31,7 +32,7 @@ static int xhci_plat_setup(struct usb_hcd *hcd); static int xhci_plat_start(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_plat_overrides __initconst = { - .extra_priv_size = sizeof(struct xhci_hcd), + .extra_priv_size = sizeof(struct xhci_plat_priv), .reset = xhci_plat_setup, .start = xhci_plat_start, }; @@ -49,11 +50,10 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) /* called during probe() after chip reset completes */ static int xhci_plat_setup(struct usb_hcd *hcd) { - struct device_node *of_node = hcd->self.controller->of_node; int ret; - if (of_device_is_compatible(of_node, "renesas,xhci-r8a7790") || - of_device_is_compatible(of_node, "renesas,xhci-r8a7791")) { + if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) || + xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3)) { ret = xhci_rcar_init_quirk(hcd); if (ret) return ret; @@ -64,19 +64,62 @@ static int xhci_plat_setup(struct usb_hcd *hcd) static int xhci_plat_start(struct usb_hcd *hcd) { - struct device_node *of_node = hcd->self.controller->of_node; - - if (of_device_is_compatible(of_node, "renesas,xhci-r8a7790") || - of_device_is_compatible(of_node, "renesas,xhci-r8a7791")) + if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) || + xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3)) xhci_rcar_start(hcd); return xhci_run(hcd); } +#ifdef CONFIG_OF +static const struct xhci_plat_priv xhci_plat_marvell_armada = { + .type = XHCI_PLAT_TYPE_MARVELL_ARMADA, +}; + +static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = { + .type = XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2, + .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1, +}; + +static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { + .type = XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3, + .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2, +}; + +static const struct of_device_id usb_xhci_of_match[] = { + { + .compatible = "generic-xhci", + }, { + .compatible = "xhci-platform", + }, { + .compatible = "marvell,armada-375-xhci", + .data = &xhci_plat_marvell_armada, + }, { + .compatible = "marvell,armada-380-xhci", + .data = &xhci_plat_marvell_armada, + }, { + .compatible = "renesas,xhci-r8a7790", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7791", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7793", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7795", + .data = &xhci_plat_renesas_rcar_gen3, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, usb_xhci_of_match); +#endif + static int xhci_plat_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev); + const struct of_device_id *match; const struct hc_driver *driver; struct xhci_hcd *xhci; struct resource *res; @@ -134,10 +177,17 @@ static int xhci_plat_probe(struct platform_device *pdev) goto put_hcd; } - if (of_device_is_compatible(pdev->dev.of_node, - "marvell,armada-375-xhci") || - of_device_is_compatible(pdev->dev.of_node, - "marvell,armada-380-xhci")) { + xhci = hcd_to_xhci(hcd); + match = of_match_node(usb_xhci_of_match, node); + if (match) { + const struct xhci_plat_priv *priv_match = match->data; + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + /* Just copy data for now */ + *priv = *priv_match; + } + + if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) { ret = xhci_mvebu_mbus_init_quirk(pdev); if (ret) goto disable_clk; @@ -145,7 +195,6 @@ static int xhci_plat_probe(struct platform_device *pdev) device_wakeup_enable(hcd->self.controller); - xhci = hcd_to_xhci(hcd); xhci->clk = clk; xhci->main_hcd = hcd; xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev, @@ -256,19 +305,6 @@ static const struct dev_pm_ops xhci_plat_pm_ops = { #define DEV_PM_OPS NULL #endif /* CONFIG_PM */ -#ifdef CONFIG_OF -static const struct of_device_id usb_xhci_of_match[] = { - { .compatible = "generic-xhci" }, - { .compatible = "xhci-platform" }, - { .compatible = "marvell,armada-375-xhci"}, - { .compatible = "marvell,armada-380-xhci"}, - { .compatible = "renesas,xhci-r8a7790"}, - { .compatible = "renesas,xhci-r8a7791"}, - { }, -}; -MODULE_DEVICE_TABLE(of, usb_xhci_of_match); -#endif - static const struct acpi_device_id usb_xhci_acpi_match[] = { /* XHCI-compliant USB Controller */ { "PNP0D10", }, diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h new file mode 100644 index 000000000000..5a2e2e3936c4 --- /dev/null +++ b/drivers/usb/host/xhci-plat.h @@ -0,0 +1,39 @@ +/* + * xhci-plat.h - xHCI host controller driver platform Bus Glue. + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#ifndef _XHCI_PLAT_H +#define _XHCI_PLAT_H + +#include "xhci.h" /* for hcd_to_xhci() */ + +enum xhci_plat_type { + XHCI_PLAT_TYPE_MARVELL_ARMADA, + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2, + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3, +}; + +struct xhci_plat_priv { + enum xhci_plat_type type; + const char *firmware_name; +}; + +#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv) + +static inline bool xhci_plat_type_is(struct usb_hcd *hcd, + enum xhci_plat_type type) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (priv && priv->type == type) + return true; + else + return false; +} +#endif /* _XHCI_PLAT_H */ diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index ff0d1b44ea58..623100e9385e 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -14,10 +14,17 @@ #include <linux/usb/phy.h> #include "xhci.h" +#include "xhci-plat.h" #include "xhci-rcar.h" -#define FIRMWARE_NAME "r8a779x_usb3_v1.dlmem" -MODULE_FIRMWARE(FIRMWARE_NAME); +/* +* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes +* performance degradation. So, this driver continues to use the V1 if R-Car +* Gen2. +* - The V1 firmware is impossible to use on R-Car Gen3. +*/ +MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1); +MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2); /*** Register Offset ***/ #define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */ @@ -56,6 +63,19 @@ MODULE_FIRMWARE(FIRMWARE_NAME); #define RCAR_USB3_RX_POL_VAL BIT(21) #define RCAR_USB3_TX_POL_VAL BIT(4) +static void xhci_rcar_start_gen2(struct usb_hcd *hcd) +{ + /* LCLK Select */ + writel(RCAR_USB3_LCLK_ENA_VAL, hcd->regs + RCAR_USB3_LCLK); + /* USB3.0 Configuration */ + writel(RCAR_USB3_CONF1_VAL, hcd->regs + RCAR_USB3_CONF1); + writel(RCAR_USB3_CONF2_VAL, hcd->regs + RCAR_USB3_CONF2); + writel(RCAR_USB3_CONF3_VAL, hcd->regs + RCAR_USB3_CONF3); + /* USB3.0 Polarity */ + writel(RCAR_USB3_RX_POL_VAL, hcd->regs + RCAR_USB3_RX_POL); + writel(RCAR_USB3_TX_POL_VAL, hcd->regs + RCAR_USB3_TX_POL); +} + void xhci_rcar_start(struct usb_hcd *hcd) { u32 temp; @@ -65,27 +85,23 @@ void xhci_rcar_start(struct usb_hcd *hcd) temp = readl(hcd->regs + RCAR_USB3_INT_ENA); temp |= RCAR_USB3_INT_ENA_VAL; writel(temp, hcd->regs + RCAR_USB3_INT_ENA); - /* LCLK Select */ - writel(RCAR_USB3_LCLK_ENA_VAL, hcd->regs + RCAR_USB3_LCLK); - /* USB3.0 Configuration */ - writel(RCAR_USB3_CONF1_VAL, hcd->regs + RCAR_USB3_CONF1); - writel(RCAR_USB3_CONF2_VAL, hcd->regs + RCAR_USB3_CONF2); - writel(RCAR_USB3_CONF3_VAL, hcd->regs + RCAR_USB3_CONF3); - /* USB3.0 Polarity */ - writel(RCAR_USB3_RX_POL_VAL, hcd->regs + RCAR_USB3_RX_POL); - writel(RCAR_USB3_TX_POL_VAL, hcd->regs + RCAR_USB3_TX_POL); + if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2)) + xhci_rcar_start_gen2(hcd); } } -static int xhci_rcar_download_firmware(struct device *dev, void __iomem *regs) +static int xhci_rcar_download_firmware(struct usb_hcd *hcd) { + struct device *dev = hcd->self.controller; + void __iomem *regs = hcd->regs; + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); const struct firmware *fw; int retval, index, j, time; int timeout = 10000; u32 data, val, temp; /* request R-Car USB3.0 firmware */ - retval = request_firmware(&fw, FIRMWARE_NAME, dev); + retval = request_firmware(&fw, priv->firmware_name, dev); if (retval) return retval; @@ -144,5 +160,5 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) if (!hcd->regs) return 0; - return xhci_rcar_download_firmware(hcd->self.controller, hcd->regs); + return xhci_rcar_download_firmware(hcd); } diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h index 58501256715d..2941a25cfe98 100644 --- a/drivers/usb/host/xhci-rcar.h +++ b/drivers/usb/host/xhci-rcar.h @@ -11,6 +11,9 @@ #ifndef _XHCI_RCAR_H #define _XHCI_RCAR_H +#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem" +#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem" + #if IS_ENABLED(CONFIG_USB_XHCI_RCAR) void xhci_rcar_start(struct usb_hcd *hcd); int xhci_rcar_init_quirk(struct usb_hcd *hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index eeaa6c6bd540..f1c21c40b4a6 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -68,6 +68,7 @@ #include <linux/slab.h> #include "xhci.h" #include "xhci-trace.h" +#include "xhci-mtk.h" /* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA @@ -3075,17 +3076,22 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, { u32 maxp, total_packet_count; - if (xhci->hci_version < 0x100) + /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ + if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) return ((td_total_len - transferred) >> 10); - maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); - total_packet_count = DIV_ROUND_UP(td_total_len, maxp); - /* One TRB with a zero-length data packet. */ if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) || trb_buff_len == td_total_len) return 0; + /* for MTK xHCI, TD size doesn't include this TRB */ + if (xhci->quirks & XHCI_MTK_HOST) + trb_buff_len = 0; + + maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); + /* Queueing functions don't count the current TRB into transferred */ return (total_packet_count - ((transferred + trb_buff_len) / maxp)); } @@ -3473,7 +3479,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= 0x1; /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ - if (xhci->hci_version >= 0x100) { + if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_TX_TYPE(TRB_DATA_IN); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3f912705dcef..26a44c0e969e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -31,6 +31,7 @@ #include "xhci.h" #include "xhci-trace.h" +#include "xhci-mtk.h" #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" @@ -634,7 +635,11 @@ int xhci_run(struct usb_hcd *hcd) "// Set the interrupt modulation register"); temp = readl(&xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; - temp |= (u32) 160; + /* + * the increment interval is 8 times as much as that defined + * in xHCI spec on MTK's controller + */ + temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160); writel(temp, &xhci->ir_set->irq_control); /* Set the HCD state before we enable the irqs */ @@ -1698,6 +1703,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); + if (xhci->quirks & XHCI_MTK_HOST) + xhci_mtk_drop_ep_quirk(hcd, udev, ep); + xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, @@ -1793,6 +1801,15 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return -ENOMEM; } + if (xhci->quirks & XHCI_MTK_HOST) { + ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); + if (ret < 0) { + xhci_free_or_cache_endpoint_ring(xhci, + virt_dev, ep_index); + return ret; + } + } + ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); @@ -4960,7 +4977,7 @@ EXPORT_SYMBOL_GPL(xhci_gen_setup); static const struct hc_driver xhci_hc_driver = { .description = "xhci-hcd", .product_desc = "xHCI Host Controller", - .hcd_priv_size = sizeof(struct xhci_hcd *), + .hcd_priv_size = sizeof(struct xhci_hcd), /* * generic hardware linkage @@ -5059,6 +5076,10 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); + + if (usb_disabled()) + return -ENODEV; + return 0; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 0b9451250e33..9be7348872ba 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1630,6 +1630,7 @@ struct xhci_hcd { /* For controllers with a broken beyond repair streams implementation */ #define XHCI_BROKEN_STREAMS (1 << 19) #define XHCI_PME_STUCK_QUIRK (1 << 20) +#define XHCI_MTK_HOST (1 << 21) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1656,6 +1657,9 @@ struct xhci_hcd { u32 port_status_u0; /* Compliance Mode Timer Triggered every 2 seconds */ #define COMP_MODE_RCVRY_MSECS 2000 + + /* platform-specific data -- must come last */ + unsigned long priv[0] __aligned(sizeof(s64)); }; /* Platform specific overrides to generic XHCI hc_driver ops */ diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 306d6852ebc7..8efbabacc84e 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -2825,21 +2825,7 @@ sisusb_lseek(struct file *file, loff_t offset, int orig) return -ENODEV; } - switch (orig) { - case 0: - file->f_pos = offset; - ret = file->f_pos; - /* never negative, no force_successful_syscall needed */ - break; - case 1: - file->f_pos += offset; - ret = file->f_pos; - /* never negative, no force_successful_syscall needed */ - break; - default: - /* seeking relative to "end of file" is not supported */ - ret = -EINVAL; - } + ret = no_seek_end_llseek(file, offset, orig); mutex_unlock(&sisusb->lock); return ret; diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 637f3f7cfce8..92fdb6e9faff 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -22,18 +22,42 @@ static void complicated_callback(struct urb *urb); /*-------------------------------------------------------------------------*/ /* FIXME make these public somewhere; usbdevfs.h? */ -struct usbtest_param { + +/* Parameter for usbtest driver. */ +struct usbtest_param_32 { /* inputs */ - unsigned test_num; /* 0..(TEST_CASES-1) */ - unsigned iterations; - unsigned length; - unsigned vary; - unsigned sglen; + __u32 test_num; /* 0..(TEST_CASES-1) */ + __u32 iterations; + __u32 length; + __u32 vary; + __u32 sglen; /* outputs */ - struct timeval duration; + __s32 duration_sec; + __s32 duration_usec; }; -#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param) + +/* + * Compat parameter to the usbtest driver. + * This supports older user space binaries compiled with 64 bit compiler. + */ +struct usbtest_param_64 { + /* inputs */ + __u32 test_num; /* 0..(TEST_CASES-1) */ + __u32 iterations; + __u32 length; + __u32 vary; + __u32 sglen; + + /* outputs */ + __s64 duration_sec; + __s64 duration_usec; +}; + +/* IOCTL interface to the driver. */ +#define USBTEST_REQUEST_32 _IOWR('U', 100, struct usbtest_param_32) +/* COMPAT IOCTL interface to the driver. */ +#define USBTEST_REQUEST_64 _IOWR('U', 100, struct usbtest_param_64) /*-------------------------------------------------------------------------*/ @@ -1030,7 +1054,7 @@ struct ctrl_ctx { unsigned pending; int status; struct urb **urb; - struct usbtest_param *param; + struct usbtest_param_32 *param; int last; }; @@ -1155,7 +1179,7 @@ error: } static int -test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param) +test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param) { struct usb_device *udev = testdev_to_usbdev(dev); struct urb **urb; @@ -1849,7 +1873,7 @@ static void complicated_callback(struct urb *urb) goto done; default: dev_err(&ctx->dev->intf->dev, - "iso resubmit err %d\n", + "resubmit err %d\n", status); /* FALLTHROUGH */ case -ENODEV: /* disconnected */ @@ -1863,7 +1887,7 @@ static void complicated_callback(struct urb *urb) if (ctx->pending == 0) { if (ctx->errors) dev_err(&ctx->dev->intf->dev, - "iso test, %lu errors out of %lu\n", + "during the test, %lu errors out of %lu\n", ctx->errors, ctx->packet_count); complete(&ctx->done); } @@ -1930,7 +1954,7 @@ static struct urb *iso_alloc_urb( } static int -test_queue(struct usbtest_dev *dev, struct usbtest_param *param, +test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, int pipe, struct usb_endpoint_descriptor *desc, unsigned offset) { struct transfer_context context; @@ -2049,81 +2073,20 @@ static int test_unaligned_bulk( return retval; } -/*-------------------------------------------------------------------------*/ - -/* We only have this one interface to user space, through usbfs. - * User mode code can scan usbfs to find N different devices (maybe on - * different busses) to use when testing, and allocate one thread per - * test. So discovery is simplified, and we have no device naming issues. - * - * Don't use these only as stress/load tests. Use them along with with - * other USB bus activity: plugging, unplugging, mousing, mp3 playback, - * video capture, and so on. Run different tests at different times, in - * different sequences. Nothing here should interact with other devices, - * except indirectly by consuming USB bandwidth and CPU resources for test - * threads and request completion. But the only way to know that for sure - * is to test when HC queues are in use by many devices. - * - * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(), - * it locks out usbcore in certain code paths. Notably, if you disconnect - * the device-under-test, hub_wq will wait block forever waiting for the - * ioctl to complete ... so that usb_disconnect() can abort the pending - * urbs and then call usbtest_disconnect(). To abort a test, you're best - * off just killing the userspace task and waiting for it to exit. - */ - +/* Run tests. */ static int -usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) +usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) { struct usbtest_dev *dev = usb_get_intfdata(intf); struct usb_device *udev = testdev_to_usbdev(dev); - struct usbtest_param *param = buf; - int retval = -EOPNOTSUPP; struct urb *urb; struct scatterlist *sg; struct usb_sg_request req; - struct timeval start; unsigned i; - - /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */ - - pattern = mod_pattern; - - if (code != USBTEST_REQUEST) - return -EOPNOTSUPP; + int retval = -EOPNOTSUPP; if (param->iterations <= 0) return -EINVAL; - - if (param->sglen > MAX_SGLEN) - return -EINVAL; - - if (mutex_lock_interruptible(&dev->lock)) - return -ERESTARTSYS; - - /* FIXME: What if a system sleep starts while a test is running? */ - - /* some devices, like ez-usb default devices, need a non-default - * altsetting to have any active endpoints. some tests change - * altsettings; force a default so most tests don't need to check. - */ - if (dev->info->alt >= 0) { - int res; - - if (intf->altsetting->desc.bInterfaceNumber) { - mutex_unlock(&dev->lock); - return -ENODEV; - } - res = set_altsetting(dev, dev->info->alt); - if (res) { - dev_err(&intf->dev, - "set altsetting to %d failed, %d\n", - dev->info->alt, res); - mutex_unlock(&dev->lock); - return res; - } - } - /* * Just a bunch of test cases that every HCD is expected to handle. * @@ -2133,7 +2096,6 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) * FIXME add more tests! cancel requests, verify the data, control * queueing, concurrent read+write threads, and so on. */ - do_gettimeofday(&start); switch (param->test_num) { case 0: @@ -2548,13 +2510,116 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) dev->in_pipe, NULL, 0); break; } - do_gettimeofday(¶m->duration); - param->duration.tv_sec -= start.tv_sec; - param->duration.tv_usec -= start.tv_usec; - if (param->duration.tv_usec < 0) { - param->duration.tv_usec += 1000 * 1000; - param->duration.tv_sec -= 1; + return retval; +} + +/*-------------------------------------------------------------------------*/ + +/* We only have this one interface to user space, through usbfs. + * User mode code can scan usbfs to find N different devices (maybe on + * different busses) to use when testing, and allocate one thread per + * test. So discovery is simplified, and we have no device naming issues. + * + * Don't use these only as stress/load tests. Use them along with with + * other USB bus activity: plugging, unplugging, mousing, mp3 playback, + * video capture, and so on. Run different tests at different times, in + * different sequences. Nothing here should interact with other devices, + * except indirectly by consuming USB bandwidth and CPU resources for test + * threads and request completion. But the only way to know that for sure + * is to test when HC queues are in use by many devices. + * + * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(), + * it locks out usbcore in certain code paths. Notably, if you disconnect + * the device-under-test, hub_wq will wait block forever waiting for the + * ioctl to complete ... so that usb_disconnect() can abort the pending + * urbs and then call usbtest_disconnect(). To abort a test, you're best + * off just killing the userspace task and waiting for it to exit. + */ + +static int +usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) +{ + + struct usbtest_dev *dev = usb_get_intfdata(intf); + struct usbtest_param_64 *param_64 = buf; + struct usbtest_param_32 temp; + struct usbtest_param_32 *param_32 = buf; + struct timespec64 start; + struct timespec64 end; + struct timespec64 duration; + int retval = -EOPNOTSUPP; + + /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */ + + pattern = mod_pattern; + + if (mutex_lock_interruptible(&dev->lock)) + return -ERESTARTSYS; + + /* FIXME: What if a system sleep starts while a test is running? */ + + /* some devices, like ez-usb default devices, need a non-default + * altsetting to have any active endpoints. some tests change + * altsettings; force a default so most tests don't need to check. + */ + if (dev->info->alt >= 0) { + if (intf->altsetting->desc.bInterfaceNumber) { + retval = -ENODEV; + goto free_mutex; + } + retval = set_altsetting(dev, dev->info->alt); + if (retval) { + dev_err(&intf->dev, + "set altsetting to %d failed, %d\n", + dev->info->alt, retval); + goto free_mutex; + } + } + + switch (code) { + case USBTEST_REQUEST_64: + temp.test_num = param_64->test_num; + temp.iterations = param_64->iterations; + temp.length = param_64->length; + temp.sglen = param_64->sglen; + temp.vary = param_64->vary; + param_32 = &temp; + break; + + case USBTEST_REQUEST_32: + break; + + default: + retval = -EOPNOTSUPP; + goto free_mutex; + } + + ktime_get_ts64(&start); + + retval = usbtest_do_ioctl(intf, param_32); + if (retval) + goto free_mutex; + + ktime_get_ts64(&end); + + duration = timespec64_sub(end, start); + + temp.duration_sec = duration.tv_sec; + temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC; + + switch (code) { + case USBTEST_REQUEST_32: + param_32->duration_sec = temp.duration_sec; + param_32->duration_usec = temp.duration_usec; + break; + + case USBTEST_REQUEST_64: + param_64->duration_sec = temp.duration_sec; + param_64->duration_usec = temp.duration_usec; + break; } + +free_mutex: mutex_unlock(&dev->lock); return retval; } diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 3598f1a62673..1a874a1f3890 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/time64.h> #include <asm/uaccess.h> @@ -92,8 +93,8 @@ struct mon_bin_hdr { unsigned short busnum; /* Bus number */ char flag_setup; char flag_data; - s64 ts_sec; /* gettimeofday */ - s32 ts_usec; /* gettimeofday */ + s64 ts_sec; /* getnstimeofday64 */ + s32 ts_usec; /* getnstimeofday64 */ int status; unsigned int len_urb; /* Length of data (submitted or actual) */ unsigned int len_cap; /* Delivered length */ @@ -483,7 +484,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, char ev_type, int status) { const struct usb_endpoint_descriptor *epd = &urb->ep->desc; - struct timeval ts; + struct timespec64 ts; unsigned long flags; unsigned int urb_length; unsigned int offset; @@ -494,7 +495,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, struct mon_bin_hdr *ep; char data_tag = 0; - do_gettimeofday(&ts); + getnstimeofday64(&ts); spin_lock_irqsave(&rp->b_lock, flags); @@ -568,7 +569,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; - ep->ts_usec = ts.tv_usec; + ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = status; ep->len_urb = urb_length; ep->len_cap = length + lendesc; @@ -629,12 +630,12 @@ static void mon_bin_complete(void *data, struct urb *urb, int status) static void mon_bin_error(void *data, struct urb *urb, int error) { struct mon_reader_bin *rp = data; - struct timeval ts; + struct timespec64 ts; unsigned long flags; unsigned int offset; struct mon_bin_hdr *ep; - do_gettimeofday(&ts); + getnstimeofday64(&ts); spin_lock_irqsave(&rp->b_lock, flags); @@ -656,7 +657,7 @@ static void mon_bin_error(void *data, struct urb *urb, int error) ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; - ep->ts_usec = ts.tv_usec; + ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = error; ep->flag_setup = '-'; diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c index f7c292f4891e..fec3f1128fdc 100644 --- a/drivers/usb/mon/mon_main.c +++ b/drivers/usb/mon/mon_main.c @@ -241,7 +241,7 @@ static struct notifier_block mon_nb = { /* * Ops */ -static struct usb_mon_operations mon_ops_0 = { +static const struct usb_mon_operations mon_ops_0 = { .urb_submit = mon_submit, .urb_submit_error = mon_submit_error, .urb_complete = mon_complete, diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index ad408251d955..e59334b09c41 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -9,6 +9,7 @@ #include <linux/usb.h> #include <linux/slab.h> #include <linux/time.h> +#include <linux/ktime.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/debugfs.h> @@ -176,12 +177,12 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, static inline unsigned int mon_get_timestamp(void) { - struct timeval tval; + struct timespec64 now; unsigned int stamp; - do_gettimeofday(&tval); - stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ - stamp = stamp * 1000000 + tval.tv_usec; + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; return stamp; } @@ -386,7 +387,8 @@ static ssize_t mon_text_read_t(struct file *file, char __user *buf, struct mon_event_text *ep; struct mon_text_ptr ptr; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) return PTR_ERR(ep); mutex_lock(&rp->printf_lock); ptr.cnt = 0; @@ -413,7 +415,8 @@ static ssize_t mon_text_read_u(struct file *file, char __user *buf, struct mon_event_text *ep; struct mon_text_ptr ptr; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) return PTR_ERR(ep); mutex_lock(&rp->printf_lock); ptr.cnt = 0; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ee9ff7028b92..c3791a01ab31 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1360,8 +1360,7 @@ static int ep_config_from_table(struct musb *musb) break; } - printk(KERN_DEBUG "%s: setup fifo_mode %d\n", - musb_driver_name, fifo_mode); + pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode); done: @@ -1390,7 +1389,7 @@ done: musb->nr_endpoints = max(epn, musb->nr_endpoints); } - printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", + pr_debug("%s: %d/%d max ep, %d/%d memory\n", musb_driver_name, n + 1, musb->config->num_eps * 2 - 1, offset, (1 << (musb->config->ram_bits + 2))); @@ -1491,8 +1490,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb) if (reg & MUSB_CONFIGDATA_SOFTCONE) strcat(aInfo, ", SoftConn"); - printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", - musb_driver_name, reg, aInfo); + pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo); aDate[0] = 0; if (MUSB_CONTROLLER_MHDRC == musb_type) { @@ -1502,9 +1500,8 @@ static int musb_core_init(u16 musb_type, struct musb *musb) musb->is_multipoint = 0; type = ""; #ifndef CONFIG_USB_OTG_BLACKLIST_HUB - printk(KERN_ERR - "%s: kernel must blacklist external hubs\n", - musb_driver_name); + pr_err("%s: kernel must blacklist external hubs\n", + musb_driver_name); #endif } @@ -1513,8 +1510,8 @@ static int musb_core_init(u16 musb_type, struct musb *musb) snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers), MUSB_HWVERS_MINOR(musb->hwvers), (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : ""); - printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", - musb_driver_name, type, aRevision, aDate); + pr_debug("%s: %sHDRC RTL version %s %s\n", + musb_driver_name, type, aRevision, aDate); /* configure ep0 */ musb_configure_ep0(musb); @@ -1705,6 +1702,23 @@ EXPORT_SYMBOL_GPL(musb_dma_completion); #define use_dma 0 #endif +static void (*musb_phy_callback)(enum musb_vbus_id_status status); + +/* + * musb_mailbox - optional phy notifier function + * @status phy state change + * + * Optionally gets called from the USB PHY. Note that the USB PHY must be + * disabled at the point the phy_callback is registered or unregistered. + */ +void musb_mailbox(enum musb_vbus_id_status status) +{ + if (musb_phy_callback) + musb_phy_callback(status); + +}; +EXPORT_SYMBOL_GPL(musb_mailbox); + /*-------------------------------------------------------------------------*/ static ssize_t @@ -2117,8 +2131,15 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->xceiv->io_ops = &musb_ulpi_access; } + if (musb->ops->phy_callback) + musb_phy_callback = musb->ops->phy_callback; + pm_runtime_get_sync(musb->controller); + status = usb_phy_init(musb->xceiv); + if (status < 0) + goto err_usb_phy_init; + if (use_dma && dev->dma_mask) { musb->dma_controller = musb_dma_controller_create(musb, musb->mregs); @@ -2239,7 +2260,11 @@ fail3: cancel_delayed_work_sync(&musb->deassert_reset_work); if (musb->dma_controller) musb_dma_controller_destroy(musb->dma_controller); + fail2_5: + usb_phy_shutdown(musb->xceiv); + +err_usb_phy_init: pm_runtime_put_sync(musb->controller); fail2: @@ -2295,10 +2320,13 @@ static int musb_remove(struct platform_device *pdev) */ musb_exit_debugfs(musb); musb_shutdown(pdev); + musb_phy_callback = NULL; if (musb->dma_controller) musb_dma_controller_destroy(musb->dma_controller); + usb_phy_shutdown(musb->xceiv); + cancel_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 2337d7a7d62d..fd215fb45fd4 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -168,6 +168,7 @@ struct musb_io; * @adjust_channel_params: pre check for standard dma channel_program func * @pre_root_reset_end: called before the root usb port reset flag gets cleared * @post_root_reset_end: called after the root usb port reset flag gets cleared + * @phy_callback: optional callback function for the phy to call */ struct musb_platform_ops { @@ -214,6 +215,7 @@ struct musb_platform_ops { dma_addr_t *dma_addr, u32 *len); void (*pre_root_reset_end)(struct musb *musb); void (*post_root_reset_end)(struct musb *musb); + void (*phy_callback)(enum musb_vbus_id_status status); }; /* diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 67ad630c86c9..87bd578799a8 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -353,9 +353,8 @@ static void txstate(struct musb *musb, struct musb_request *req) * 1 >0 Yes(FS bulk) */ if (!musb_ep->hb_mult || - (musb_ep->hb_mult && - can_bulk_split(musb, - musb_ep->type))) + can_bulk_split(musb, + musb_ep->type)) csr |= MUSB_TXCSR_AUTOSET; } csr &= ~MUSB_TXCSR_P_UNDERRUN; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 1bd9232ff76f..c84e0322c108 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -36,7 +36,7 @@ #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/delay.h> -#include <linux/usb/musb-omap.h> +#include <linux/usb/musb.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> @@ -46,7 +46,7 @@ struct omap2430_glue { struct device *dev; struct platform_device *musb; - enum omap_musb_vbus_id_status status; + enum musb_vbus_id_status status; struct work_struct omap_musb_mailbox_work; struct device *control_otghs; }; @@ -234,7 +234,7 @@ static inline void omap2430_low_level_init(struct musb *musb) musb_writel(musb->mregs, OTG_FORCESTDBY, l); } -void omap_musb_mailbox(enum omap_musb_vbus_id_status status) +static void omap2430_musb_mailbox(enum musb_vbus_id_status status) { struct omap2430_glue *glue = _glue; @@ -251,7 +251,6 @@ void omap_musb_mailbox(enum omap_musb_vbus_id_status status) schedule_work(&glue->omap_musb_mailbox_work); } -EXPORT_SYMBOL_GPL(omap_musb_mailbox); static void omap_musb_set_mailbox(struct omap2430_glue *glue) { @@ -262,7 +261,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) struct usb_otg *otg = musb->xceiv->otg; switch (glue->status) { - case OMAP_MUSB_ID_GROUND: + case MUSB_ID_GROUND: dev_dbg(dev, "ID GND\n"); otg->default_a = true; @@ -276,7 +275,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) } break; - case OMAP_MUSB_VBUS_VALID: + case MUSB_VBUS_VALID: dev_dbg(dev, "VBUS Connect\n"); otg->default_a = false; @@ -287,8 +286,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; - case OMAP_MUSB_ID_FLOAT: - case OMAP_MUSB_VBUS_OFF: + case MUSB_ID_FLOAT: + case MUSB_VBUS_OFF: dev_dbg(dev, "VBUS Disconnect\n"); musb->xceiv->last_event = USB_EVENT_NONE; @@ -430,7 +429,7 @@ static int omap2430_musb_init(struct musb *musb) setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); - if (glue->status != OMAP_MUSB_UNKNOWN) + if (glue->status != MUSB_UNKNOWN) omap_musb_set_mailbox(glue); phy_init(musb->phy); @@ -455,7 +454,7 @@ static void omap2430_musb_enable(struct musb *musb) switch (glue->status) { - case OMAP_MUSB_ID_GROUND: + case MUSB_ID_GROUND: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST); if (data->interface_type != MUSB_INTERFACE_UTMI) break; @@ -474,7 +473,7 @@ static void omap2430_musb_enable(struct musb *musb) } break; - case OMAP_MUSB_VBUS_VALID: + case MUSB_VBUS_VALID: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; @@ -488,7 +487,7 @@ static void omap2430_musb_disable(struct musb *musb) struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); - if (glue->status != OMAP_MUSB_UNKNOWN) + if (glue->status != MUSB_UNKNOWN) omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); } @@ -520,6 +519,8 @@ static const struct musb_platform_ops omap2430_ops = { .enable = omap2430_musb_enable, .disable = omap2430_musb_disable, + + .phy_callback = omap2430_musb_mailbox, }; static u64 omap2430_dmamask = DMA_BIT_MASK(32); @@ -551,7 +552,7 @@ static int omap2430_probe(struct platform_device *pdev) glue->dev = &pdev->dev; glue->musb = musb; - glue->status = OMAP_MUSB_UNKNOWN; + glue->status = MUSB_UNKNOWN; glue->control_otghs = ERR_PTR(-ENODEV); if (np) { @@ -663,8 +664,11 @@ static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); + pm_runtime_get_sync(glue->dev); cancel_work_sync(&glue->omap_musb_mailbox_work); platform_device_unregister(glue->musb); + pm_runtime_put_sync(glue->dev); + pm_runtime_disable(glue->dev); return 0; } diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 22e8ecb6bfbd..c6904742e2aa 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -66,6 +66,7 @@ config AM335X_PHY_USB select USB_PHY select AM335X_CONTROL_USB select NOP_USB_XCEIV + select USB_COMMON help This driver provides PHY support for that phy which part for the AM335x SoC. @@ -186,19 +187,6 @@ config USB_MXS_PHY MXS Phy is used by some of the i.MX SoCs, for example imx23/28/6x. -config USB_RCAR_PHY - tristate "Renesas R-Car USB PHY support" - depends on USB || USB_GADGET - depends on ARCH_R8A7778 || ARCH_R8A7779 || COMPILE_TEST - select USB_PHY - help - Say Y here to add support for the Renesas R-Car USB common PHY driver. - This chip is typically used as USB PHY for USB host, gadget. - This driver supports R8A7778 and R8A7779. - - To compile this driver as a module, choose M here: the - module will be called phy-rcar-usb. - config USB_ULPI bool "Generic ULPI Transceiver Driver" depends on ARM || ARM64 diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 19c0dccbb116..b433e5d89be4 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o -obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 7b3035ff9434..42a1afe36a90 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c @@ -4,7 +4,8 @@ #include <linux/of.h> #include <linux/io.h> #include <linux/delay.h> -#include "am35x-phy-control.h" +#include <linux/usb/otg.h> +#include "phy-am335x-control.h" struct am335x_control_usb { struct device *dev; @@ -58,7 +59,8 @@ static void am335x_phy_wkup(struct phy_control *phy_ctrl, u32 id, bool on) spin_unlock(&usb_ctrl->lock); } -static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on) +static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, + enum usb_dr_mode dr_mode, bool on) { struct am335x_control_usb *usb_ctrl; u32 val; @@ -80,8 +82,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on) val = readl(usb_ctrl->phy_reg + reg); if (on) { - val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN); - val |= USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN; + if (dr_mode == USB_DR_MODE_HOST) { + val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN | + USBPHY_OTGVDET_EN); + val |= USBPHY_OTGSESSEND_EN; + } else { + val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN); + val |= USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN; + } } else { val |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN; } diff --git a/drivers/usb/phy/am35x-phy-control.h b/drivers/usb/phy/phy-am335x-control.h index b96594d1962c..e86b3165d69d 100644 --- a/drivers/usb/phy/am35x-phy-control.h +++ b/drivers/usb/phy/phy-am335x-control.h @@ -2,13 +2,15 @@ #define _AM335x_PHY_CONTROL_H_ struct phy_control { - void (*phy_power)(struct phy_control *phy_ctrl, u32 id, bool on); + void (*phy_power)(struct phy_control *phy_ctrl, u32 id, + enum usb_dr_mode dr_mode, bool on); void (*phy_wkup)(struct phy_control *phy_ctrl, u32 id, bool on); }; -static inline void phy_ctrl_power(struct phy_control *phy_ctrl, u32 id, bool on) +static inline void phy_ctrl_power(struct phy_control *phy_ctrl, u32 id, + enum usb_dr_mode dr_mode, bool on) { - phy_ctrl->phy_power(phy_ctrl, id, on); + phy_ctrl->phy_power(phy_ctrl, id, dr_mode, on); } static inline void phy_ctrl_wkup(struct phy_control *phy_ctrl, u32 id, bool on) diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 90b67a4ca221..39b424f7f629 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -8,21 +8,23 @@ #include <linux/regulator/consumer.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/usb/of.h> -#include "am35x-phy-control.h" +#include "phy-am335x-control.h" #include "phy-generic.h" struct am335x_phy { struct usb_phy_generic usb_phy_gen; struct phy_control *phy_ctrl; int id; + enum usb_dr_mode dr_mode; }; static int am335x_init(struct usb_phy *phy) { struct am335x_phy *am_phy = dev_get_drvdata(phy->dev); - phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, true); + phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true); return 0; } @@ -30,7 +32,7 @@ static void am335x_shutdown(struct usb_phy *phy) { struct am335x_phy *am_phy = dev_get_drvdata(phy->dev); - phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false); + phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); } static int am335x_phy_probe(struct platform_device *pdev) @@ -46,12 +48,15 @@ static int am335x_phy_probe(struct platform_device *pdev) am_phy->phy_ctrl = am335x_get_phy_control(dev); if (!am_phy->phy_ctrl) return -EPROBE_DEFER; + am_phy->id = of_alias_get_id(pdev->dev.of_node, "phy"); if (am_phy->id < 0) { dev_err(&pdev->dev, "Missing PHY id: %d\n", am_phy->id); return am_phy->id; } + am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node); + ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL); if (ret) return ret; @@ -75,7 +80,7 @@ static int am335x_phy_probe(struct platform_device *pdev) */ device_set_wakeup_enable(dev, false); - phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false); + phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); return 0; } @@ -105,7 +110,7 @@ static int am335x_phy_suspend(struct device *dev) if (device_may_wakeup(dev)) phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, true); - phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false); + phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); return 0; } @@ -115,7 +120,7 @@ static int am335x_phy_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct am335x_phy *am_phy = platform_get_drvdata(pdev); - phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, true); + phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true); if (device_may_wakeup(dev)) phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, false); diff --git a/drivers/usb/phy/phy-rcar-usb.c b/drivers/usb/phy/phy-rcar-usb.c deleted file mode 100644 index 1e09b8377885..000000000000 --- a/drivers/usb/phy/phy-rcar-usb.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Renesas R-Car USB phy driver - * - * Copyright (C) 2012-2013 Renesas Solutions Corp. - * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/usb/otg.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/platform_data/usb-rcar-phy.h> - -/* REGS block */ -#define USBPCTRL0 0x00 -#define USBPCTRL1 0x04 -#define USBST 0x08 -#define USBEH0 0x0C -#define USBOH0 0x1C -#define USBCTL0 0x58 - -/* High-speed signal quality characteristic control registers (R8A7778 only) */ -#define HSQCTL1 0x24 -#define HSQCTL2 0x28 - -/* USBPCTRL0 */ -#define OVC2 (1 << 10) /* (R8A7779 only) */ - /* Switches the OVC input pin for port 2: */ - /* 1: USB_OVC2, 0: OVC2 */ -#define OVC1_VBUS1 (1 << 9) /* Switches the OVC input pin for port 1: */ - /* 1: USB_OVC1, 0: OVC1/VBUS1 */ - /* Function mode: set to 0 */ -#define OVC0 (1 << 8) /* Switches the OVC input pin for port 0: */ - /* 1: USB_OVC0 pin, 0: OVC0 */ -#define OVC2_ACT (1 << 6) /* (R8A7779 only) */ - /* Host mode: OVC2 polarity: */ - /* 1: active-high, 0: active-low */ -#define PENC (1 << 4) /* Function mode: output level of PENC1 pin: */ - /* 1: high, 0: low */ -#define OVC0_ACT (1 << 3) /* Host mode: OVC0 polarity: */ - /* 1: active-high, 0: active-low */ -#define OVC1_ACT (1 << 1) /* Host mode: OVC1 polarity: */ - /* 1: active-high, 0: active-low */ - /* Function mode: be sure to set to 1 */ -#define PORT1 (1 << 0) /* Selects port 1 mode: */ - /* 1: function, 0: host */ -/* USBPCTRL1 */ -#define PHY_RST (1 << 2) -#define PLL_ENB (1 << 1) -#define PHY_ENB (1 << 0) - -/* USBST */ -#define ST_ACT (1 << 31) -#define ST_PLL (1 << 30) - -struct rcar_usb_phy_priv { - struct usb_phy phy; - spinlock_t lock; - - void __iomem *reg0; - void __iomem *reg1; - int counter; -}; - -#define usb_phy_to_priv(p) container_of(p, struct rcar_usb_phy_priv, phy) - - -/* - * USB initial/install operation. - * - * This function setup USB phy. - * The used value and setting order came from - * [USB :: Initial setting] on datasheet. - */ -static int rcar_usb_phy_init(struct usb_phy *phy) -{ - struct rcar_usb_phy_priv *priv = usb_phy_to_priv(phy); - struct device *dev = phy->dev; - struct rcar_phy_platform_data *pdata = dev_get_platdata(dev); - void __iomem *reg0 = priv->reg0; - void __iomem *reg1 = priv->reg1; - static const u8 ovcn_act[] = { OVC0_ACT, OVC1_ACT, OVC2_ACT }; - int i; - u32 val; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - if (priv->counter++ == 0) { - - /* - * USB phy start-up - */ - - /* (1) USB-PHY standby release */ - iowrite32(PHY_ENB, (reg0 + USBPCTRL1)); - - /* (2) start USB-PHY internal PLL */ - iowrite32(PHY_ENB | PLL_ENB, (reg0 + USBPCTRL1)); - - /* (3) set USB-PHY in accord with the conditions of usage */ - if (reg1) { - u32 hsqctl1 = pdata->ferrite_bead ? 0x41 : 0; - u32 hsqctl2 = pdata->ferrite_bead ? 0x0d : 7; - - iowrite32(hsqctl1, reg1 + HSQCTL1); - iowrite32(hsqctl2, reg1 + HSQCTL2); - } - - /* (4) USB module status check */ - for (i = 0; i < 1024; i++) { - udelay(10); - val = ioread32(reg0 + USBST); - if (val == (ST_ACT | ST_PLL)) - break; - } - - if (val != (ST_ACT | ST_PLL)) { - dev_err(dev, "USB phy not ready\n"); - goto phy_init_end; - } - - /* (5) USB-PHY reset clear */ - iowrite32(PHY_ENB | PLL_ENB | PHY_RST, (reg0 + USBPCTRL1)); - - /* Board specific port settings */ - val = 0; - if (pdata->port1_func) - val |= PORT1; - if (pdata->penc1) - val |= PENC; - for (i = 0; i < 3; i++) { - /* OVCn bits follow each other in the right order */ - if (pdata->ovc_pin[i].select_3_3v) - val |= OVC0 << i; - /* OVCn_ACT bits are spaced by irregular intervals */ - if (pdata->ovc_pin[i].active_high) - val |= ovcn_act[i]; - } - iowrite32(val, (reg0 + USBPCTRL0)); - - /* - * Bus alignment settings - */ - - /* (1) EHCI bus alignment (little endian) */ - iowrite32(0x00000000, (reg0 + USBEH0)); - - /* (1) OHCI bus alignment (little endian) */ - iowrite32(0x00000000, (reg0 + USBOH0)); - } - -phy_init_end: - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - -static void rcar_usb_phy_shutdown(struct usb_phy *phy) -{ - struct rcar_usb_phy_priv *priv = usb_phy_to_priv(phy); - void __iomem *reg0 = priv->reg0; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - - if (priv->counter-- == 1) /* last user */ - iowrite32(0x00000000, (reg0 + USBPCTRL1)); - - spin_unlock_irqrestore(&priv->lock, flags); -} - -static int rcar_usb_phy_probe(struct platform_device *pdev) -{ - struct rcar_usb_phy_priv *priv; - struct resource *res0, *res1; - struct device *dev = &pdev->dev; - void __iomem *reg0, *reg1 = NULL; - int ret; - - if (!dev_get_platdata(&pdev->dev)) { - dev_err(dev, "No platform data\n"); - return -EINVAL; - } - - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg0 = devm_ioremap_resource(dev, res0); - if (IS_ERR(reg0)) - return PTR_ERR(reg0); - - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - reg1 = devm_ioremap_resource(dev, res1); - if (IS_ERR(reg1)) - return PTR_ERR(reg1); - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->reg0 = reg0; - priv->reg1 = reg1; - priv->counter = 0; - priv->phy.dev = dev; - priv->phy.label = dev_name(dev); - priv->phy.init = rcar_usb_phy_init; - priv->phy.shutdown = rcar_usb_phy_shutdown; - spin_lock_init(&priv->lock); - - ret = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2); - if (ret < 0) { - dev_err(dev, "usb phy addition error\n"); - return ret; - } - - platform_set_drvdata(pdev, priv); - - return ret; -} - -static int rcar_usb_phy_remove(struct platform_device *pdev) -{ - struct rcar_usb_phy_priv *priv = platform_get_drvdata(pdev); - - usb_remove_phy(&priv->phy); - - return 0; -} - -static struct platform_driver rcar_usb_phy_driver = { - .driver = { - .name = "rcar_usb_phy", - }, - .probe = rcar_usb_phy_probe, - .remove = rcar_usb_phy_remove, -}; - -module_platform_driver(rcar_usb_phy_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Renesas R-Car USB phy"); -MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 12741856a75c..014dbbd72132 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> -#include <linux/usb/musb-omap.h> +#include <linux/usb/musb.h> #include <linux/usb/phy_companion.h> #include <linux/phy/omap_usb.h> #include <linux/i2c/twl.h> @@ -102,7 +102,7 @@ struct twl6030_usb { int irq1; int irq2; - enum omap_musb_vbus_id_status linkstat; + enum musb_vbus_id_status linkstat; u8 asleep; bool vbus_enable; const char *regulator; @@ -189,13 +189,13 @@ static ssize_t twl6030_usb_vbus_show(struct device *dev, spin_lock_irqsave(&twl->lock, flags); switch (twl->linkstat) { - case OMAP_MUSB_VBUS_VALID: + case MUSB_VBUS_VALID: ret = snprintf(buf, PAGE_SIZE, "vbus\n"); break; - case OMAP_MUSB_ID_GROUND: + case MUSB_ID_GROUND: ret = snprintf(buf, PAGE_SIZE, "id\n"); break; - case OMAP_MUSB_VBUS_OFF: + case MUSB_VBUS_OFF: ret = snprintf(buf, PAGE_SIZE, "none\n"); break; default: @@ -210,7 +210,7 @@ static DEVICE_ATTR(vbus, 0444, twl6030_usb_vbus_show, NULL); static irqreturn_t twl6030_usb_irq(int irq, void *_twl) { struct twl6030_usb *twl = _twl; - enum omap_musb_vbus_id_status status = OMAP_MUSB_UNKNOWN; + enum musb_vbus_id_status status = MUSB_UNKNOWN; u8 vbus_state, hw_state; int ret; @@ -225,14 +225,14 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl) dev_err(twl->dev, "Failed to enable usb3v3\n"); twl->asleep = 1; - status = OMAP_MUSB_VBUS_VALID; + status = MUSB_VBUS_VALID; twl->linkstat = status; - omap_musb_mailbox(status); + musb_mailbox(status); } else { - if (twl->linkstat != OMAP_MUSB_UNKNOWN) { - status = OMAP_MUSB_VBUS_OFF; + if (twl->linkstat != MUSB_UNKNOWN) { + status = MUSB_VBUS_OFF; twl->linkstat = status; - omap_musb_mailbox(status); + musb_mailbox(status); if (twl->asleep) { regulator_disable(twl->usb3v3); twl->asleep = 0; @@ -248,7 +248,7 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl) static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) { struct twl6030_usb *twl = _twl; - enum omap_musb_vbus_id_status status = OMAP_MUSB_UNKNOWN; + enum musb_vbus_id_status status = MUSB_UNKNOWN; u8 hw_state; int ret; @@ -262,9 +262,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) twl->asleep = 1; twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR); twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); - status = OMAP_MUSB_ID_GROUND; + status = MUSB_ID_GROUND; twl->linkstat = status; - omap_musb_mailbox(status); + musb_mailbox(status); } else { twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); @@ -334,7 +334,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) twl->dev = &pdev->dev; twl->irq1 = platform_get_irq(pdev, 0); twl->irq2 = platform_get_irq(pdev, 1); - twl->linkstat = OMAP_MUSB_UNKNOWN; + twl->linkstat = MUSB_UNKNOWN; twl->comparator.set_vbus = twl6030_set_vbus; twl->comparator.start_srp = twl6030_start_srp; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index d82fa36c3465..5af9ca5d54ab 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -302,37 +302,37 @@ static void usbhsc_set_buswait(struct usbhs_priv *priv) */ /* commonly used on old SH-Mobile SoCs */ -static u32 usbhsc_default_pipe_type[] = { - USB_ENDPOINT_XFER_CONTROL, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, +static struct renesas_usbhs_driver_pipe_config usbhsc_default_pipe[] = { + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_CONTROL, 64, 0x00, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x08, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x18, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x28, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x38, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x48, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x04, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x05, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x06, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x07, false), }; /* commonly used on newer SH-Mobile and R-Car SoCs */ -static u32 usbhsc_new_pipe_type[] = { - USB_ENDPOINT_XFER_CONTROL, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, +static struct renesas_usbhs_driver_pipe_config usbhsc_new_pipe[] = { + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_CONTROL, 64, 0x00, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x08, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x28, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x48, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x58, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x68, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x04, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x05, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x06, false), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x78, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x88, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x98, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xa8, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xb8, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xc8, true), + RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xd8, true), }; /* @@ -481,6 +481,15 @@ static const struct of_device_id usbhs_of_match[] = { .compatible = "renesas,usbhs-r8a7795", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, + { + .compatible = "renesas,rcar-gen2-usbhs", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, + { + /* Gen3 is compatible with Gen2 */ + .compatible = "renesas,rcar-gen3-usbhs", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, { }, }; MODULE_DEVICE_TABLE(of, usbhs_of_match); @@ -564,10 +573,9 @@ static int usbhs_probe(struct platform_device *pdev) switch (priv->dparam.type) { case USBHS_TYPE_RCAR_GEN2: priv->pfunc = usbhs_rcar2_ops; - if (!priv->dparam.pipe_type) { - priv->dparam.pipe_type = usbhsc_new_pipe_type; - priv->dparam.pipe_size = - ARRAY_SIZE(usbhsc_new_pipe_type); + if (!priv->dparam.pipe_configs) { + priv->dparam.pipe_configs = usbhsc_new_pipe; + priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe); } break; default: @@ -586,9 +594,9 @@ static int usbhs_probe(struct platform_device *pdev) dfunc->notify_hotplug = usbhsc_drvcllbck_notify_hotplug; /* set default param if platform doesn't have */ - if (!priv->dparam.pipe_type) { - priv->dparam.pipe_type = usbhsc_default_pipe_type; - priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type); + if (!priv->dparam.pipe_configs) { + priv->dparam.pipe_configs = usbhsc_default_pipe; + priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_default_pipe); } if (!priv->dparam.pio_dma_border) priv->dparam.pio_dma_border = 64; /* 64byte */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 8f7a78e70975..657f9672ceba 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -1042,6 +1042,8 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) struct usbhsg_gpriv *gpriv; struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); + struct renesas_usbhs_driver_pipe_config *pipe_configs = + usbhs_get_dparam(priv, pipe_configs); int pipe_size = usbhs_get_dparam(priv, pipe_size); int i; int ret; @@ -1111,13 +1113,16 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) gpriv->gadget.ep0 = &uep->ep; usb_ep_set_maxpacket_limit(&uep->ep, 64); uep->ep.caps.type_control = true; - } - /* init normal pipe */ - else { - usb_ep_set_maxpacket_limit(&uep->ep, 512); - uep->ep.caps.type_iso = true; - uep->ep.caps.type_bulk = true; - uep->ep.caps.type_int = true; + } else { + /* init normal pipe */ + if (pipe_configs[i].type == USB_ENDPOINT_XFER_ISOC) + uep->ep.caps.type_iso = true; + if (pipe_configs[i].type == USB_ENDPOINT_XFER_BULK) + uep->ep.caps.type_bulk = true; + if (pipe_configs[i].type == USB_ENDPOINT_XFER_INT) + uep->ep.caps.type_int = true; + usb_ep_set_maxpacket_limit(&uep->ep, + pipe_configs[i].bufsize); list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list); } uep->ep.caps.dir_in = true; diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c index bd050359926c..1a8e4c45c4c5 100644 --- a/drivers/usb/renesas_usbhs/mod_host.c +++ b/drivers/usb/renesas_usbhs/mod_host.c @@ -1414,7 +1414,8 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv) { struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); struct usbhs_pipe *pipe; - u32 *pipe_type = usbhs_get_dparam(priv, pipe_type); + struct renesas_usbhs_driver_pipe_config *pipe_configs = + usbhs_get_dparam(priv, pipe_configs); int pipe_size = usbhs_get_dparam(priv, pipe_size); int old_type, dir_in, i; @@ -1442,15 +1443,15 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv) * USB_ENDPOINT_XFER_BULK -> dir in * ... */ - dir_in = (pipe_type[i] == old_type); - old_type = pipe_type[i]; + dir_in = (pipe_configs[i].type == old_type); + old_type = pipe_configs[i].type; - if (USB_ENDPOINT_XFER_CONTROL == pipe_type[i]) { + if (USB_ENDPOINT_XFER_CONTROL == pipe_configs[i].type) { pipe = usbhs_dcp_malloc(priv); usbhsh_hpriv_to_dcp(hpriv) = pipe; } else { pipe = usbhs_pipe_malloc(priv, - pipe_type[i], + pipe_configs[i].type, dir_in); } diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 4f9c3356127a..0e95d2925dc5 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -44,6 +44,15 @@ char *usbhs_pipe_name(struct usbhs_pipe *pipe) return usbhsp_pipe_name[usbhs_pipe_type(pipe)]; } +static struct renesas_usbhs_driver_pipe_config +*usbhsp_get_pipe_config(struct usbhs_priv *priv, int pipe_num) +{ + struct renesas_usbhs_driver_pipe_config *pipe_configs = + usbhs_get_dparam(priv, pipe_configs); + + return &pipe_configs[pipe_num]; +} + /* * DCPCTR/PIPEnCTR functions */ @@ -384,18 +393,6 @@ void usbhs_pipe_set_trans_count_if_bulk(struct usbhs_pipe *pipe, int len) /* * pipe setup */ -static int usbhsp_possible_double_buffer(struct usbhs_pipe *pipe) -{ - /* - * only ISO / BULK pipe can use double buffer - */ - if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) || - usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) - return 1; - - return 0; -} - static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, int is_host, int dir_in) @@ -412,7 +409,6 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, [USB_ENDPOINT_XFER_INT] = TYPE_INT, [USB_ENDPOINT_XFER_ISOC] = TYPE_ISO, }; - int is_double = usbhsp_possible_double_buffer(pipe); if (usbhs_pipe_is_dcp(pipe)) return -EINVAL; @@ -434,10 +430,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK)) bfre = 0; /* FIXME */ - /* DBLB */ - if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) || - usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK)) - dblb = (is_double) ? DBLB : 0; + /* DBLB: see usbhs_pipe_config_update() */ /* CNTMD */ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK)) @@ -473,13 +466,13 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); - struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); struct device *dev = usbhs_priv_to_dev(priv); int pipe_num = usbhs_pipe_number(pipe); - int is_double = usbhsp_possible_double_buffer(pipe); u16 buff_size; u16 bufnmb; u16 bufnmb_cnt; + struct renesas_usbhs_driver_pipe_config *pipe_config = + usbhsp_get_pipe_config(priv, pipe_num); /* * PIPEBUF @@ -489,56 +482,13 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe) * - "Features" - "Pipe configuration" * - "Operation" - "FIFO Buffer Memory" * - "Operation" - "Pipe Control" - * - * ex) if pipe6 - pipe9 are USB_ENDPOINT_XFER_INT (SH7724) - * - * BUFNMB: PIPE - * 0: pipe0 (DCP 256byte) - * 1: - - * 2: - - * 3: - - * 4: pipe6 (INT 64byte) - * 5: pipe7 (INT 64byte) - * 6: pipe8 (INT 64byte) - * 7: pipe9 (INT 64byte) - * 8 - xx: free (for BULK, ISOC) */ - - /* - * FIXME - * - * it doesn't have good buffer allocator - * - * DCP : 256 byte - * BULK: 512 byte - * INT : 64 byte - * ISOC: 512 byte - */ - if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_CONTROL)) - buff_size = 256; - else if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT)) - buff_size = 64; - else - buff_size = 512; + buff_size = pipe_config->bufsize; + bufnmb = pipe_config->bufnum; /* change buff_size to register value */ bufnmb_cnt = (buff_size / 64) - 1; - /* BUFNMB has been reserved for INT pipe - * see above */ - if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT)) { - bufnmb = pipe_num - 2; - } else { - bufnmb = info->bufnmb_last; - info->bufnmb_last += bufnmb_cnt + 1; - - /* - * double buffer - */ - if (is_double) - info->bufnmb_last += bufnmb_cnt + 1; - } - dev_dbg(dev, "pipe : %d : buff_size 0x%x: bufnmb 0x%x\n", pipe_num, buff_size, bufnmb); @@ -549,8 +499,13 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe) void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel, u16 epnum, u16 maxp) { + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + int pipe_num = usbhs_pipe_number(pipe); + struct renesas_usbhs_driver_pipe_config *pipe_config = + usbhsp_get_pipe_config(priv, pipe_num); + u16 dblb = pipe_config->double_buf ? DBLB : 0; + if (devsel > 0xA) { - struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); dev_err(dev, "devsel error %d\n", devsel); @@ -568,7 +523,7 @@ void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel, maxp); if (!usbhs_pipe_is_dcp(pipe)) - usbhsp_pipe_cfg_set(pipe, 0x000F, epnum); + usbhsp_pipe_cfg_set(pipe, 0x000F | DBLB, epnum | dblb); } /* @@ -708,23 +663,7 @@ void usbhs_pipe_init(struct usbhs_priv *priv, struct usbhs_pipe *pipe; int i; - /* - * FIXME - * - * driver needs good allocator. - * - * find first free buffer area (BULK, ISOC) - * (DCP, INT area is fixed) - * - * buffer number 0 - 3 have been reserved for DCP - * see - * usbhsp_to_bufnmb - */ - info->bufnmb_last = 4; usbhs_for_each_pipe_with_dcp(pipe, priv, i) { - if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT)) - info->bufnmb_last++; - usbhsp_flags_init(pipe); pipe->fifo = NULL; pipe->mod_private = NULL; @@ -851,12 +790,13 @@ int usbhs_pipe_probe(struct usbhs_priv *priv) struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); struct usbhs_pipe *pipe; struct device *dev = usbhs_priv_to_dev(priv); - u32 *pipe_type = usbhs_get_dparam(priv, pipe_type); + struct renesas_usbhs_driver_pipe_config *pipe_configs = + usbhs_get_dparam(priv, pipe_configs); int pipe_size = usbhs_get_dparam(priv, pipe_size); int i; /* This driver expects 1st pipe is DCP */ - if (pipe_type[0] != USB_ENDPOINT_XFER_CONTROL) { + if (pipe_configs[0].type != USB_ENDPOINT_XFER_CONTROL) { dev_err(dev, "1st PIPE is not DCP\n"); return -EINVAL; } @@ -876,10 +816,10 @@ int usbhs_pipe_probe(struct usbhs_priv *priv) pipe->priv = priv; usbhs_pipe_type(pipe) = - pipe_type[i] & USB_ENDPOINT_XFERTYPE_MASK; + pipe_configs[i].type & USB_ENDPOINT_XFERTYPE_MASK; dev_dbg(dev, "pipe %x\t: %s\n", - i, usbhsp_pipe_name[pipe_type[i]]); + i, usbhsp_pipe_name[pipe_configs[i].type]); } return 0; diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index b0bc7b603016..3212ab51e844 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -46,7 +46,6 @@ struct usbhs_pipe { struct usbhs_pipe_info { struct usbhs_pipe *pipe; int size; /* array size of "pipe" */ - int bufnmb_last; /* FIXME : driver needs good allocator */ int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map); }; diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 56ecb8b5115d..f612dda9c977 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -475,6 +475,22 @@ config USB_SERIAL_MOS7840 To compile this driver as a module, choose M here: the module will be called mos7840. If unsure, choose N. +config USB_SERIAL_MXUPORT11 + tristate "USB Moxa UPORT 11x0 Serial Driver" + ---help--- + Say Y here if you want to use a MOXA UPort 11x0 Serial hub. + + This driver supports: + + - UPort 1110 : 1 port RS-232 USB to Serial Hub. + - UPort 1130 : 1 port RS-422/485 USB to Serial Hub. + - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation. + - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub. + - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation. + + To compile this driver as a module, choose M here: the + module will be called mxu11x0. + config USB_SERIAL_MXUPORT tristate "USB Moxa UPORT Serial Driver" ---help--- diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index 349d9df0895f..f3fa5e53702d 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o +obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7d4f51a32e66..9b90ad747d87 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -38,13 +38,14 @@ static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, struct ktermios*); +static bool cp210x_tx_empty(struct usb_serial_port *port); static int cp210x_tiocmget(struct tty_struct *); static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int); static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int, unsigned int); static void cp210x_break_ctl(struct tty_struct *, int); -static int cp210x_startup(struct usb_serial *); -static void cp210x_release(struct usb_serial *); +static int cp210x_port_probe(struct usb_serial_port *); +static int cp210x_port_remove(struct usb_serial_port *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); static const struct usb_device_id id_table[] = { @@ -160,6 +161,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ @@ -196,8 +198,9 @@ static const struct usb_device_id id_table[] = { MODULE_DEVICE_TABLE(usb, id_table); -struct cp210x_serial_private { +struct cp210x_port_private { __u8 bInterfaceNumber; + bool has_swapped_line_ctl; }; static struct usb_serial_driver cp210x_device = { @@ -213,10 +216,11 @@ static struct usb_serial_driver cp210x_device = { .close = cp210x_close, .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, + .tx_empty = cp210x_tx_empty, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, - .attach = cp210x_startup, - .release = cp210x_release, + .port_probe = cp210x_port_probe, + .port_remove = cp210x_port_remove, .dtr_rts = cp210x_dtr_rts }; @@ -299,6 +303,25 @@ static struct usb_serial_driver * const serial_drivers[] = { #define CONTROL_WRITE_DTR 0x0100 #define CONTROL_WRITE_RTS 0x0200 +/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ +struct cp210x_comm_status { + __le32 ulErrors; + __le32 ulHoldReasons; + __le32 ulAmountInInQueue; + __le32 ulAmountInOutQueue; + u8 bEofReceived; + u8 bWaitForImmediate; + u8 bReserved; +} __packed; + +/* + * CP210X_PURGE - 16 bits passed in wValue of USB request. + * SiLabs app note AN571 gives a strange description of the 4 bits: + * bit 0 or bit 2 clears the transmit queue and 1 or 3 receive. + * writing 1 to all, however, purges cp2108 well enough to avoid the hang. + */ +#define PURGE_ALL 0x000f + /* * cp210x_get_config * Reads from the CP210x configuration registers @@ -310,7 +333,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; - struct cp210x_serial_private *spriv = usb_get_serial_data(serial); + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; @@ -324,7 +347,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, /* Issue the request, attempting to read 'size' bytes */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), request, REQTYPE_INTERFACE_TO_HOST, 0x0000, - spriv->bInterfaceNumber, buf, size, + port_priv->bInterfaceNumber, buf, size, USB_CTRL_GET_TIMEOUT); /* Convert data into an array of integers */ @@ -355,7 +378,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; - struct cp210x_serial_private *spriv = usb_get_serial_data(serial); + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; @@ -374,13 +397,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_INTERFACE, 0x0000, - spriv->bInterfaceNumber, buf, size, + port_priv->bInterfaceNumber, buf, size, USB_CTRL_SET_TIMEOUT); } else { result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_INTERFACE, data[0], - spriv->bInterfaceNumber, NULL, 0, + port_priv->bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); } @@ -410,6 +433,60 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port, } /* + * Detect CP2108 GET_LINE_CTL bug and activate workaround. + * Write a known good value 0x800, read it back. + * If it comes back swapped the bug is detected. + * Preserve the original register value. + */ +static int cp210x_detect_swapped_line_ctl(struct usb_serial_port *port) +{ + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + unsigned int line_ctl_save; + unsigned int line_ctl_test; + int err; + + err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_save, 2); + if (err) + return err; + + line_ctl_test = 0x800; + err = cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_test, 2); + if (err) + return err; + + err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_test, 2); + if (err) + return err; + + if (line_ctl_test == 8) { + port_priv->has_swapped_line_ctl = true; + line_ctl_save = swab16((u16)line_ctl_save); + } + + return cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_save, 2); +} + +/* + * Must always be called instead of cp210x_get_config(CP210X_GET_LINE_CTL) + * to workaround cp2108 bug and get correct value. + */ +static int cp210x_get_line_ctl(struct usb_serial_port *port, unsigned int *ctl) +{ + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + int err; + + err = cp210x_get_config(port, CP210X_GET_LINE_CTL, ctl, 2); + if (err) + return err; + + /* Workaround swapped bytes in 16-bit value from CP210X_GET_LINE_CTL */ + if (port_priv->has_swapped_line_ctl) + *ctl = swab16((u16)(*ctl)); + + return 0; +} + +/* * cp210x_quantise_baudrate * Quantises the baud rate as per AN205 Table 1 */ @@ -474,11 +551,63 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) static void cp210x_close(struct usb_serial_port *port) { + unsigned int purge_ctl; + usb_serial_generic_close(port); + + /* Clear both queues; cp2108 needs this to avoid an occasional hang */ + purge_ctl = PURGE_ALL; + cp210x_set_config(port, CP210X_PURGE, &purge_ctl, 2); + cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE); } /* + * Read how many bytes are waiting in the TX queue. + */ +static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port, + u32 *count) +{ + struct usb_serial *serial = port->serial; + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + struct cp210x_comm_status *sts; + int result; + + sts = kmalloc(sizeof(*sts), GFP_KERNEL); + if (!sts) + return -ENOMEM; + + result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + CP210X_GET_COMM_STATUS, REQTYPE_INTERFACE_TO_HOST, + 0, port_priv->bInterfaceNumber, sts, sizeof(*sts), + USB_CTRL_GET_TIMEOUT); + if (result == sizeof(*sts)) { + *count = le32_to_cpu(sts->ulAmountInOutQueue); + result = 0; + } else { + dev_err(&port->dev, "failed to get comm status: %d\n", result); + if (result >= 0) + result = -EPROTO; + } + + kfree(sts); + + return result; +} + +static bool cp210x_tx_empty(struct usb_serial_port *port) +{ + int err; + u32 count; + + err = cp210x_get_tx_queue_byte_count(port, &count); + if (err) + return true; + + return !count; +} + +/* * cp210x_get_termios * Reads the baud rate, data bits, parity, stop bits and flow control mode * from the device, corrects any unsupported values, and configures the @@ -519,7 +648,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, cflag = *cflagp; - cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); + cp210x_get_line_ctl(port, &bits); cflag &= ~CSIZE; switch (bits & BITS_DATA_MASK) { case BITS_DATA_5: @@ -687,7 +816,7 @@ static void cp210x_set_termios(struct tty_struct *tty, /* If the number of data bits is to be updated */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { - cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); + cp210x_get_line_ctl(port, &bits); bits &= ~BITS_DATA_MASK; switch (cflag & CSIZE) { case CS5: @@ -721,7 +850,7 @@ static void cp210x_set_termios(struct tty_struct *tty, if ((cflag & (PARENB|PARODD|CMSPAR)) != (old_cflag & (PARENB|PARODD|CMSPAR))) { - cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); + cp210x_get_line_ctl(port, &bits); bits &= ~BITS_PARITY_MASK; if (cflag & PARENB) { if (cflag & CMSPAR) { @@ -747,7 +876,7 @@ static void cp210x_set_termios(struct tty_struct *tty, } if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) { - cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); + cp210x_get_line_ctl(port, &bits); bits &= ~BITS_STOP_MASK; if (cflag & CSTOPB) { bits |= BITS_STOP_2; @@ -862,29 +991,39 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state) cp210x_set_config(port, CP210X_SET_BREAK, &state, 2); } -static int cp210x_startup(struct usb_serial *serial) +static int cp210x_port_probe(struct usb_serial_port *port) { + struct usb_serial *serial = port->serial; struct usb_host_interface *cur_altsetting; - struct cp210x_serial_private *spriv; + struct cp210x_port_private *port_priv; + int ret; - spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); - if (!spriv) + port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); + if (!port_priv) return -ENOMEM; cur_altsetting = serial->interface->cur_altsetting; - spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber; + port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber; + + usb_set_serial_port_data(port, port_priv); - usb_set_serial_data(serial, spriv); + ret = cp210x_detect_swapped_line_ctl(port); + if (ret) { + kfree(port_priv); + return ret; + } return 0; } -static void cp210x_release(struct usb_serial *serial) +static int cp210x_port_remove(struct usb_serial_port *port) { - struct cp210x_serial_private *spriv; + struct cp210x_port_private *port_priv; + + port_priv = usb_get_serial_port_data(port); + kfree(port_priv); - spriv = usb_get_serial_data(serial); - kfree(spriv); + return 0; } module_usb_serial_driver(serial_drivers, id_table); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index c0866971db2b..f49327d20ee8 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -1046,9 +1046,8 @@ static void edge_close(struct usb_serial_port *port) edge_port->closePending = true; - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPChase))) { + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPChase) { /* flush and chase */ edge_port->chaseResponsePending = true; @@ -1061,9 +1060,8 @@ static void edge_close(struct usb_serial_port *port) edge_port->chaseResponsePending = false; } - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPClose))) { + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPClose) { /* close the port */ dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CLOSE_PORT\n", __func__); send_iosp_ext_cmd(edge_port, IOSP_CMD_CLOSE_PORT, 0); @@ -1612,9 +1610,8 @@ static void edge_break(struct tty_struct *tty, int break_state) struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial); int status; - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPChase))) { + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPChase) { /* flush and chase */ edge_port->chaseResponsePending = true; @@ -1628,9 +1625,8 @@ static void edge_break(struct tty_struct *tty, int break_state) } } - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPSetClrBreak))) { + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPSetClrBreak) { if (break_state == -1) { dev_dbg(&port->dev, "%s - Sending IOSP_CMD_SET_BREAK\n", __func__); status = send_iosp_ext_cmd(edge_port, @@ -2465,9 +2461,8 @@ static void change_port_settings(struct tty_struct *tty, unsigned char stop_char = STOP_CHAR(tty); unsigned char start_char = START_CHAR(tty); - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPSetXChar))) { + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPSetXChar) { send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XON_CHAR, start_char); send_iosp_ext_cmd(edge_port, @@ -2494,13 +2489,11 @@ static void change_port_settings(struct tty_struct *tty, } /* Set flow control to the configured value */ - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPSetRxFlow))) + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPSetRxFlow) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow); - if ((!edge_serial->is_epic) || - ((edge_serial->is_epic) && - (edge_serial->epic_descriptor.Supports.IOSPSetTxFlow))) + if (!edge_serial->is_epic || + edge_serial->epic_descriptor.Supports.IOSPSetTxFlow) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow); diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index f51a5d52c0ed..ec1b8f2c1183 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty, * through. Since this has a reasonably high failure rate, we retry * several times. */ - while (retries--) { + while (retries) { + retries--; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 0x1, 0, NULL, 0, 100); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 8ac9b55f05af..2c69bfcdacc6 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -635,7 +635,7 @@ static void mos7840_interrupt_callback(struct urb *urb) * Byte 4 IIR Port 4 (port.number is 3) * Byte 5 FIFO status for both */ - if (length && length > 5) { + if (length > 5) { dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n"); return; } diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c new file mode 100644 index 000000000000..e3c3f57c2d82 --- /dev/null +++ b/drivers/usb/serial/mxu11x0.c @@ -0,0 +1,986 @@ +/* + * USB Moxa UPORT 11x0 Serial Driver + * + * Copyright (C) 2007 MOXA Technologies Co., Ltd. + * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * Supports the following Moxa USB to serial converters: + * UPort 1110, 1 port RS-232 USB to Serial Hub. + * UPort 1130, 1 port RS-422/485 USB to Serial Hub. + * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation + * protection. + * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub. + * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation + * protection. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/firmware.h> +#include <linux/jiffies.h> +#include <linux/serial.h> +#include <linux/serial_reg.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/uaccess.h> +#include <linux/usb.h> +#include <linux/usb/serial.h> + +/* Vendor and product ids */ +#define MXU1_VENDOR_ID 0x110a +#define MXU1_1110_PRODUCT_ID 0x1110 +#define MXU1_1130_PRODUCT_ID 0x1130 +#define MXU1_1150_PRODUCT_ID 0x1150 +#define MXU1_1151_PRODUCT_ID 0x1151 +#define MXU1_1131_PRODUCT_ID 0x1131 + +/* Commands */ +#define MXU1_GET_VERSION 0x01 +#define MXU1_GET_PORT_STATUS 0x02 +#define MXU1_GET_PORT_DEV_INFO 0x03 +#define MXU1_GET_CONFIG 0x04 +#define MXU1_SET_CONFIG 0x05 +#define MXU1_OPEN_PORT 0x06 +#define MXU1_CLOSE_PORT 0x07 +#define MXU1_START_PORT 0x08 +#define MXU1_STOP_PORT 0x09 +#define MXU1_TEST_PORT 0x0A +#define MXU1_PURGE_PORT 0x0B +#define MXU1_RESET_EXT_DEVICE 0x0C +#define MXU1_GET_OUTQUEUE 0x0D +#define MXU1_WRITE_DATA 0x80 +#define MXU1_READ_DATA 0x81 +#define MXU1_REQ_TYPE_CLASS 0x82 + +/* Module identifiers */ +#define MXU1_I2C_PORT 0x01 +#define MXU1_IEEE1284_PORT 0x02 +#define MXU1_UART1_PORT 0x03 +#define MXU1_UART2_PORT 0x04 +#define MXU1_RAM_PORT 0x05 + +/* Modem status */ +#define MXU1_MSR_DELTA_CTS 0x01 +#define MXU1_MSR_DELTA_DSR 0x02 +#define MXU1_MSR_DELTA_RI 0x04 +#define MXU1_MSR_DELTA_CD 0x08 +#define MXU1_MSR_CTS 0x10 +#define MXU1_MSR_DSR 0x20 +#define MXU1_MSR_RI 0x40 +#define MXU1_MSR_CD 0x80 +#define MXU1_MSR_DELTA_MASK 0x0F +#define MXU1_MSR_MASK 0xF0 + +/* Line status */ +#define MXU1_LSR_OVERRUN_ERROR 0x01 +#define MXU1_LSR_PARITY_ERROR 0x02 +#define MXU1_LSR_FRAMING_ERROR 0x04 +#define MXU1_LSR_BREAK 0x08 +#define MXU1_LSR_ERROR 0x0F +#define MXU1_LSR_RX_FULL 0x10 +#define MXU1_LSR_TX_EMPTY 0x20 + +/* Modem control */ +#define MXU1_MCR_LOOP 0x04 +#define MXU1_MCR_DTR 0x10 +#define MXU1_MCR_RTS 0x20 + +/* Mask settings */ +#define MXU1_UART_ENABLE_RTS_IN 0x0001 +#define MXU1_UART_DISABLE_RTS 0x0002 +#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008 +#define MXU1_UART_ENABLE_DSR_OUT 0x0010 +#define MXU1_UART_ENABLE_CTS_OUT 0x0020 +#define MXU1_UART_ENABLE_X_OUT 0x0040 +#define MXU1_UART_ENABLE_XA_OUT 0x0080 +#define MXU1_UART_ENABLE_X_IN 0x0100 +#define MXU1_UART_ENABLE_DTR_IN 0x0800 +#define MXU1_UART_DISABLE_DTR 0x1000 +#define MXU1_UART_ENABLE_MS_INTS 0x2000 +#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000 +#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000 + +/* Parity */ +#define MXU1_UART_NO_PARITY 0x00 +#define MXU1_UART_ODD_PARITY 0x01 +#define MXU1_UART_EVEN_PARITY 0x02 +#define MXU1_UART_MARK_PARITY 0x03 +#define MXU1_UART_SPACE_PARITY 0x04 + +/* Stop bits */ +#define MXU1_UART_1_STOP_BITS 0x00 +#define MXU1_UART_1_5_STOP_BITS 0x01 +#define MXU1_UART_2_STOP_BITS 0x02 + +/* Bits per character */ +#define MXU1_UART_5_DATA_BITS 0x00 +#define MXU1_UART_6_DATA_BITS 0x01 +#define MXU1_UART_7_DATA_BITS 0x02 +#define MXU1_UART_8_DATA_BITS 0x03 + +/* Operation modes */ +#define MXU1_UART_232 0x00 +#define MXU1_UART_485_RECEIVER_DISABLED 0x01 +#define MXU1_UART_485_RECEIVER_ENABLED 0x02 + +/* Pipe transfer mode and timeout */ +#define MXU1_PIPE_MODE_CONTINUOUS 0x01 +#define MXU1_PIPE_MODE_MASK 0x03 +#define MXU1_PIPE_TIMEOUT_MASK 0x7C +#define MXU1_PIPE_TIMEOUT_ENABLE 0x80 + +/* Config struct */ +struct mxu1_uart_config { + __be16 wBaudRate; + __be16 wFlags; + u8 bDataBits; + u8 bParity; + u8 bStopBits; + char cXon; + char cXoff; + u8 bUartMode; +} __packed; + +/* Purge modes */ +#define MXU1_PURGE_OUTPUT 0x00 +#define MXU1_PURGE_INPUT 0x80 + +/* Read/Write data */ +#define MXU1_RW_DATA_ADDR_SFR 0x10 +#define MXU1_RW_DATA_ADDR_IDATA 0x20 +#define MXU1_RW_DATA_ADDR_XDATA 0x30 +#define MXU1_RW_DATA_ADDR_CODE 0x40 +#define MXU1_RW_DATA_ADDR_GPIO 0x50 +#define MXU1_RW_DATA_ADDR_I2C 0x60 +#define MXU1_RW_DATA_ADDR_FLASH 0x70 +#define MXU1_RW_DATA_ADDR_DSP 0x80 + +#define MXU1_RW_DATA_UNSPECIFIED 0x00 +#define MXU1_RW_DATA_BYTE 0x01 +#define MXU1_RW_DATA_WORD 0x02 +#define MXU1_RW_DATA_DOUBLE_WORD 0x04 + +struct mxu1_write_data_bytes { + u8 bAddrType; + u8 bDataType; + u8 bDataCounter; + __be16 wBaseAddrHi; + __be16 wBaseAddrLo; + u8 bData[0]; +} __packed; + +/* Interrupt codes */ +#define MXU1_CODE_HARDWARE_ERROR 0xFF +#define MXU1_CODE_DATA_ERROR 0x03 +#define MXU1_CODE_MODEM_STATUS 0x04 + +static inline int mxu1_get_func_from_code(unsigned char code) +{ + return code & 0x0f; +} + +/* Download firmware max packet size */ +#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64 + +/* Firmware image header */ +struct mxu1_firmware_header { + __le16 wLength; + u8 bCheckSum; +} __packed; + +#define MXU1_UART_BASE_ADDR 0xFFA0 +#define MXU1_UART_OFFSET_MCR 0x0004 + +#define MXU1_BAUD_BASE 923077 + +#define MXU1_TRANSFER_TIMEOUT 2 +#define MXU1_DOWNLOAD_TIMEOUT 1000 +#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */ + +struct mxu1_port { + u8 msr; + u8 mcr; + u8 uart_mode; + spinlock_t spinlock; /* Protects msr */ + struct mutex mutex; /* Protects mcr */ + bool send_break; +}; + +struct mxu1_device { + u16 mxd_model; +}; + +static const struct usb_device_id mxu1_idtable[] = { + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, + { } +}; + +MODULE_DEVICE_TABLE(usb, mxu1_idtable); + +/* Write the given buffer out to the control pipe. */ +static int mxu1_send_ctrl_data_urb(struct usb_serial *serial, + u8 request, + u16 value, u16 index, + void *data, size_t size) +{ + int status; + + status = usb_control_msg(serial->dev, + usb_sndctrlpipe(serial->dev, 0), + request, + (USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE), value, index, + data, size, + USB_CTRL_SET_TIMEOUT); + if (status < 0) { + dev_err(&serial->interface->dev, + "%s - usb_control_msg failed: %d\n", + __func__, status); + return status; + } + + if (status != size) { + dev_err(&serial->interface->dev, + "%s - short write (%d / %zd)\n", + __func__, status, size); + return -EIO; + } + + return 0; +} + +/* Send a vendor request without any data */ +static int mxu1_send_ctrl_urb(struct usb_serial *serial, + u8 request, u16 value, u16 index) +{ + return mxu1_send_ctrl_data_urb(serial, request, value, index, + NULL, 0); +} + +static int mxu1_download_firmware(struct usb_serial *serial, + const struct firmware *fw_p) +{ + int status = 0; + int buffer_size; + int pos; + int len; + int done; + u8 cs = 0; + u8 *buffer; + struct usb_device *dev = serial->dev; + struct mxu1_firmware_header *header; + unsigned int pipe; + + pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress); + + buffer_size = fw_p->size + sizeof(*header); + buffer = kmalloc(buffer_size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + memcpy(buffer, fw_p->data, fw_p->size); + memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); + + for (pos = sizeof(*header); pos < buffer_size; pos++) + cs = (u8)(cs + buffer[pos]); + + header = (struct mxu1_firmware_header *)buffer; + header->wLength = cpu_to_le16(buffer_size - sizeof(*header)); + header->bCheckSum = cs; + + dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__); + + for (pos = 0; pos < buffer_size; pos += done) { + len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE); + + status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done, + MXU1_DOWNLOAD_TIMEOUT); + if (status) + break; + } + + kfree(buffer); + + if (status) { + dev_err(&dev->dev, "failed to download firmware: %d\n", status); + return status; + } + + msleep_interruptible(100); + usb_reset_device(dev); + + dev_dbg(&dev->dev, "%s - download successful\n", __func__); + + return 0; +} + +static int mxu1_port_probe(struct usb_serial_port *port) +{ + struct mxu1_port *mxport; + struct mxu1_device *mxdev; + + if (!port->interrupt_in_urb) { + dev_err(&port->dev, "no interrupt urb\n"); + return -ENODEV; + } + + mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL); + if (!mxport) + return -ENOMEM; + + spin_lock_init(&mxport->spinlock); + mutex_init(&mxport->mutex); + + mxdev = usb_get_serial_data(port->serial); + + switch (mxdev->mxd_model) { + case MXU1_1110_PRODUCT_ID: + case MXU1_1150_PRODUCT_ID: + case MXU1_1151_PRODUCT_ID: + mxport->uart_mode = MXU1_UART_232; + break; + case MXU1_1130_PRODUCT_ID: + case MXU1_1131_PRODUCT_ID: + mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED; + break; + } + + usb_set_serial_port_data(port, mxport); + + port->port.closing_wait = + msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10); + port->port.drain_delay = 1; + + return 0; +} + +static int mxu1_startup(struct usb_serial *serial) +{ + struct mxu1_device *mxdev; + struct usb_device *dev = serial->dev; + struct usb_host_interface *cur_altsetting; + char fw_name[32]; + const struct firmware *fw_p = NULL; + int err; + + dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n", + __func__, le16_to_cpu(dev->descriptor.idProduct), + dev->descriptor.bNumConfigurations, + dev->actconfig->desc.bConfigurationValue); + + /* create device structure */ + mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL); + if (!mxdev) + return -ENOMEM; + + usb_set_serial_data(serial, mxdev); + + mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct); + + cur_altsetting = serial->interface->cur_altsetting; + + /* if we have only 1 configuration, download firmware */ + if (cur_altsetting->desc.bNumEndpoints == 1) { + + snprintf(fw_name, + sizeof(fw_name), + "moxa/moxa-%04x.fw", + mxdev->mxd_model); + + err = request_firmware(&fw_p, fw_name, &serial->interface->dev); + if (err) { + dev_err(&serial->interface->dev, "failed to request firmware: %d\n", + err); + goto err_free_mxdev; + } + + err = mxu1_download_firmware(serial, fw_p); + if (err) + goto err_release_firmware; + + /* device is being reset */ + err = -ENODEV; + goto err_release_firmware; + } + + return 0; + +err_release_firmware: + release_firmware(fw_p); +err_free_mxdev: + kfree(mxdev); + + return err; +} + +static int mxu1_write_byte(struct usb_serial_port *port, u32 addr, + u8 mask, u8 byte) +{ + int status; + size_t size; + struct mxu1_write_data_bytes *data; + + dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n", + __func__, addr, mask, byte); + + size = sizeof(struct mxu1_write_data_bytes) + 2; + data = kzalloc(size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->bAddrType = MXU1_RW_DATA_ADDR_XDATA; + data->bDataType = MXU1_RW_DATA_BYTE; + data->bDataCounter = 1; + data->wBaseAddrHi = cpu_to_be16(addr >> 16); + data->wBaseAddrLo = cpu_to_be16(addr); + data->bData[0] = mask; + data->bData[1] = byte; + + status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0, + MXU1_RAM_PORT, data, size); + if (status < 0) + dev_err(&port->dev, "%s - failed: %d\n", __func__, status); + + kfree(data); + + return status; +} + +static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr) +{ + int status; + + status = mxu1_write_byte(port, + MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR, + MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP, + mcr); + return status; +} + +static void mxu1_set_termios(struct tty_struct *tty, + struct usb_serial_port *port, + struct ktermios *old_termios) +{ + struct mxu1_port *mxport = usb_get_serial_port_data(port); + struct mxu1_uart_config *config; + tcflag_t cflag, iflag; + speed_t baud; + int status; + unsigned int mcr; + + cflag = tty->termios.c_cflag; + iflag = tty->termios.c_iflag; + + if (old_termios && + !tty_termios_hw_change(&tty->termios, old_termios) && + tty->termios.c_iflag == old_termios->c_iflag) { + dev_dbg(&port->dev, "%s - nothing to change\n", __func__); + return; + } + + dev_dbg(&port->dev, + "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag); + + if (old_termios) { + dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n", + __func__, + old_termios->c_cflag, + old_termios->c_iflag); + } + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) + return; + + /* these flags must be set */ + config->wFlags |= MXU1_UART_ENABLE_MS_INTS; + config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA; + if (mxport->send_break) + config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL; + config->bUartMode = mxport->uart_mode; + + switch (C_CSIZE(tty)) { + case CS5: + config->bDataBits = MXU1_UART_5_DATA_BITS; + break; + case CS6: + config->bDataBits = MXU1_UART_6_DATA_BITS; + break; + case CS7: + config->bDataBits = MXU1_UART_7_DATA_BITS; + break; + default: + case CS8: + config->bDataBits = MXU1_UART_8_DATA_BITS; + break; + } + + if (C_PARENB(tty)) { + config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING; + if (C_CMSPAR(tty)) { + if (C_PARODD(tty)) + config->bParity = MXU1_UART_MARK_PARITY; + else + config->bParity = MXU1_UART_SPACE_PARITY; + } else { + if (C_PARODD(tty)) + config->bParity = MXU1_UART_ODD_PARITY; + else + config->bParity = MXU1_UART_EVEN_PARITY; + } + } else { + config->bParity = MXU1_UART_NO_PARITY; + } + + if (C_CSTOPB(tty)) + config->bStopBits = MXU1_UART_2_STOP_BITS; + else + config->bStopBits = MXU1_UART_1_STOP_BITS; + + if (C_CRTSCTS(tty)) { + /* RTS flow control must be off to drop RTS for baud rate B0 */ + if (C_BAUD(tty) != B0) + config->wFlags |= MXU1_UART_ENABLE_RTS_IN; + config->wFlags |= MXU1_UART_ENABLE_CTS_OUT; + } + + if (I_IXOFF(tty) || I_IXON(tty)) { + config->cXon = START_CHAR(tty); + config->cXoff = STOP_CHAR(tty); + + if (I_IXOFF(tty)) + config->wFlags |= MXU1_UART_ENABLE_X_IN; + + if (I_IXON(tty)) + config->wFlags |= MXU1_UART_ENABLE_X_OUT; + } + + baud = tty_get_baud_rate(tty); + if (!baud) + baud = 9600; + config->wBaudRate = MXU1_BAUD_BASE / baud; + + dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n", + __func__, baud, config->wBaudRate, config->wFlags, + config->bDataBits, config->bParity, config->bStopBits, + config->cXon, config->cXoff, config->bUartMode); + + cpu_to_be16s(&config->wBaudRate); + cpu_to_be16s(&config->wFlags); + + status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0, + MXU1_UART1_PORT, config, + sizeof(*config)); + if (status) + dev_err(&port->dev, "cannot set config: %d\n", status); + + mutex_lock(&mxport->mutex); + mcr = mxport->mcr; + + if (C_BAUD(tty) == B0) + mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS; + + status = mxu1_set_mcr(port, mcr); + if (status) + dev_err(&port->dev, "cannot set modem control: %d\n", status); + else + mxport->mcr = mcr; + + mutex_unlock(&mxport->mutex); + + kfree(config); +} + +static int mxu1_get_serial_info(struct usb_serial_port *port, + struct serial_struct __user *ret_arg) +{ + struct serial_struct ret_serial; + unsigned cwait; + + if (!ret_arg) + return -EFAULT; + + cwait = port->port.closing_wait; + if (cwait != ASYNC_CLOSING_WAIT_NONE) + cwait = jiffies_to_msecs(cwait) / 10; + + memset(&ret_serial, 0, sizeof(ret_serial)); + + ret_serial.type = PORT_16550A; + ret_serial.line = port->minor; + ret_serial.port = 0; + ret_serial.xmit_fifo_size = port->bulk_out_size; + ret_serial.baud_base = MXU1_BAUD_BASE; + ret_serial.close_delay = 5*HZ; + ret_serial.closing_wait = cwait; + + if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg))) + return -EFAULT; + + return 0; +} + + +static int mxu1_set_serial_info(struct usb_serial_port *port, + struct serial_struct __user *new_arg) +{ + struct serial_struct new_serial; + unsigned cwait; + + if (copy_from_user(&new_serial, new_arg, sizeof(new_serial))) + return -EFAULT; + + cwait = new_serial.closing_wait; + if (cwait != ASYNC_CLOSING_WAIT_NONE) + cwait = msecs_to_jiffies(10 * new_serial.closing_wait); + + port->port.closing_wait = cwait; + + return 0; +} + +static int mxu1_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct usb_serial_port *port = tty->driver_data; + + switch (cmd) { + case TIOCGSERIAL: + return mxu1_get_serial_info(port, + (struct serial_struct __user *)arg); + case TIOCSSERIAL: + return mxu1_set_serial_info(port, + (struct serial_struct __user *)arg); + } + + return -ENOIOCTLCMD; +} + +static int mxu1_tiocmget(struct tty_struct *tty) +{ + struct usb_serial_port *port = tty->driver_data; + struct mxu1_port *mxport = usb_get_serial_port_data(port); + unsigned int result; + unsigned int msr; + unsigned int mcr; + unsigned long flags; + + mutex_lock(&mxport->mutex); + spin_lock_irqsave(&mxport->spinlock, flags); + + msr = mxport->msr; + mcr = mxport->mcr; + + spin_unlock_irqrestore(&mxport->spinlock, flags); + mutex_unlock(&mxport->mutex); + + result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) | + ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) | + ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) | + ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) | + ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) | + ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) | + ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0); + + dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result); + + return result; +} + +static int mxu1_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct usb_serial_port *port = tty->driver_data; + struct mxu1_port *mxport = usb_get_serial_port_data(port); + int err; + unsigned int mcr; + + mutex_lock(&mxport->mutex); + mcr = mxport->mcr; + + if (set & TIOCM_RTS) + mcr |= MXU1_MCR_RTS; + if (set & TIOCM_DTR) + mcr |= MXU1_MCR_DTR; + if (set & TIOCM_LOOP) + mcr |= MXU1_MCR_LOOP; + + if (clear & TIOCM_RTS) + mcr &= ~MXU1_MCR_RTS; + if (clear & TIOCM_DTR) + mcr &= ~MXU1_MCR_DTR; + if (clear & TIOCM_LOOP) + mcr &= ~MXU1_MCR_LOOP; + + err = mxu1_set_mcr(port, mcr); + if (!err) + mxport->mcr = mcr; + + mutex_unlock(&mxport->mutex); + + return err; +} + +static void mxu1_break(struct tty_struct *tty, int break_state) +{ + struct usb_serial_port *port = tty->driver_data; + struct mxu1_port *mxport = usb_get_serial_port_data(port); + + if (break_state == -1) + mxport->send_break = true; + else + mxport->send_break = false; + + mxu1_set_termios(tty, port, NULL); +} + +static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port) +{ + struct mxu1_port *mxport = usb_get_serial_port_data(port); + struct usb_serial *serial = port->serial; + int status; + u16 open_settings; + + open_settings = (MXU1_PIPE_MODE_CONTINUOUS | + MXU1_PIPE_TIMEOUT_ENABLE | + (MXU1_TRANSFER_TIMEOUT << 2)); + + mxport->msr = 0; + + status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); + if (status) { + dev_err(&port->dev, "failed to submit interrupt urb: %d\n", + status); + return status; + } + + if (tty) + mxu1_set_termios(tty, port, NULL); + + status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT, + open_settings, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot send open command: %d\n", status); + goto unlink_int_urb; + } + + status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT, + 0, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot send start command: %d\n", status); + goto unlink_int_urb; + } + + status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT, + MXU1_PURGE_INPUT, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot clear input buffers: %d\n", + status); + + goto unlink_int_urb; + } + + status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT, + MXU1_PURGE_OUTPUT, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot clear output buffers: %d\n", + status); + + goto unlink_int_urb; + } + + /* + * reset the data toggle on the bulk endpoints to work around bug in + * host controllers where things get out of sync some times + */ + usb_clear_halt(serial->dev, port->write_urb->pipe); + usb_clear_halt(serial->dev, port->read_urb->pipe); + + if (tty) + mxu1_set_termios(tty, port, NULL); + + status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT, + open_settings, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot send open command: %d\n", status); + goto unlink_int_urb; + } + + status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT, + 0, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "cannot send start command: %d\n", status); + goto unlink_int_urb; + } + + status = usb_serial_generic_open(tty, port); + if (status) + goto unlink_int_urb; + + return 0; + +unlink_int_urb: + usb_kill_urb(port->interrupt_in_urb); + + return status; +} + +static void mxu1_close(struct usb_serial_port *port) +{ + int status; + + usb_serial_generic_close(port); + usb_kill_urb(port->interrupt_in_urb); + + status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT, + 0, MXU1_UART1_PORT); + if (status) { + dev_err(&port->dev, "failed to send close port command: %d\n", + status); + } +} + +static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr) +{ + struct mxu1_port *mxport = usb_get_serial_port_data(port); + struct async_icount *icount; + unsigned long flags; + + dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr); + + spin_lock_irqsave(&mxport->spinlock, flags); + mxport->msr = msr & MXU1_MSR_MASK; + spin_unlock_irqrestore(&mxport->spinlock, flags); + + if (msr & MXU1_MSR_DELTA_MASK) { + icount = &port->icount; + if (msr & MXU1_MSR_DELTA_CTS) + icount->cts++; + if (msr & MXU1_MSR_DELTA_DSR) + icount->dsr++; + if (msr & MXU1_MSR_DELTA_CD) + icount->dcd++; + if (msr & MXU1_MSR_DELTA_RI) + icount->rng++; + + wake_up_interruptible(&port->port.delta_msr_wait); + } +} + +static void mxu1_interrupt_callback(struct urb *urb) +{ + struct usb_serial_port *port = urb->context; + unsigned char *data = urb->transfer_buffer; + int length = urb->actual_length; + int function; + int status; + u8 msr; + + switch (urb->status) { + case 0: + break; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + dev_dbg(&port->dev, "%s - urb shutting down: %d\n", + __func__, urb->status); + return; + default: + dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", + __func__, urb->status); + goto exit; + } + + if (length != 2) { + dev_dbg(&port->dev, "%s - bad packet size: %d\n", + __func__, length); + goto exit; + } + + if (data[0] == MXU1_CODE_HARDWARE_ERROR) { + dev_err(&port->dev, "hardware error: %d\n", data[1]); + goto exit; + } + + function = mxu1_get_func_from_code(data[0]); + + dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n", + __func__, function, data[1]); + + switch (function) { + case MXU1_CODE_DATA_ERROR: + dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n", + __func__, data[1]); + break; + + case MXU1_CODE_MODEM_STATUS: + msr = data[1]; + mxu1_handle_new_msr(port, msr); + break; + + default: + dev_err(&port->dev, "unknown interrupt code: 0x%02X\n", + data[1]); + break; + } + +exit: + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status) { + dev_err(&port->dev, "resubmit interrupt urb failed: %d\n", + status); + } +} + +static struct usb_serial_driver mxu11x0_device = { + .driver = { + .owner = THIS_MODULE, + .name = "mxu11x0", + }, + .description = "MOXA UPort 11x0", + .id_table = mxu1_idtable, + .num_ports = 1, + .port_probe = mxu1_port_probe, + .attach = mxu1_startup, + .open = mxu1_open, + .close = mxu1_close, + .ioctl = mxu1_ioctl, + .set_termios = mxu1_set_termios, + .tiocmget = mxu1_tiocmget, + .tiocmset = mxu1_tiocmset, + .tiocmiwait = usb_serial_generic_tiocmiwait, + .get_icount = usb_serial_generic_get_icount, + .break_ctl = mxu1_break, + .read_int_callback = mxu1_interrupt_callback, +}; + +static struct usb_serial_driver *const serial_drivers[] = { + &mxu11x0_device, NULL +}; + +module_usb_serial_driver(serial_drivers, mxu1_idtable); + +MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>"); +MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("moxa/moxa-1110.fw"); +MODULE_FIRMWARE("moxa/moxa-1130.fw"); +MODULE_FIRMWARE("moxa/moxa-1131.fw"); +MODULE_FIRMWARE("moxa/moxa-1150.fw"); +MODULE_FIRMWARE("moxa/moxa-1151.fw"); diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 5c66d3f7a6d0..9ff9404f99d7 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -49,18 +49,18 @@ struct uas_dev_info { }; enum { - SUBMIT_STATUS_URB = (1 << 1), - ALLOC_DATA_IN_URB = (1 << 2), - SUBMIT_DATA_IN_URB = (1 << 3), - ALLOC_DATA_OUT_URB = (1 << 4), - SUBMIT_DATA_OUT_URB = (1 << 5), - ALLOC_CMD_URB = (1 << 6), - SUBMIT_CMD_URB = (1 << 7), - COMMAND_INFLIGHT = (1 << 8), - DATA_IN_URB_INFLIGHT = (1 << 9), - DATA_OUT_URB_INFLIGHT = (1 << 10), - COMMAND_ABORTED = (1 << 11), - IS_IN_WORK_LIST = (1 << 12), + SUBMIT_STATUS_URB = BIT(1), + ALLOC_DATA_IN_URB = BIT(2), + SUBMIT_DATA_IN_URB = BIT(3), + ALLOC_DATA_OUT_URB = BIT(4), + SUBMIT_DATA_OUT_URB = BIT(5), + ALLOC_CMD_URB = BIT(6), + SUBMIT_CMD_URB = BIT(7), + COMMAND_INFLIGHT = BIT(8), + DATA_IN_URB_INFLIGHT = BIT(9), + DATA_OUT_URB_INFLIGHT = BIT(10), + COMMAND_ABORTED = BIT(11), + IS_IN_WORK_LIST = BIT(12), }; /* Overrides scsi_pointer */ @@ -74,7 +74,7 @@ struct uas_cmd_info { /* I hate forward declarations, but I actually have a loop */ static int uas_submit_urbs(struct scsi_cmnd *cmnd, - struct uas_dev_info *devinfo, gfp_t gfp); + struct uas_dev_info *devinfo); static void uas_do_work(struct work_struct *work); static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller); static void uas_free_streams(struct uas_dev_info *devinfo); @@ -105,7 +105,7 @@ static void uas_do_work(struct work_struct *work) if (!(cmdinfo->state & IS_IN_WORK_LIST)) continue; - err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC); + err = uas_submit_urbs(cmnd, cmnd->device->hostdata); if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else @@ -240,7 +240,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd, int err; cmdinfo->state |= direction | SUBMIT_STATUS_URB; - err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC); + err = uas_submit_urbs(cmnd, cmnd->device->hostdata); if (err) { uas_add_work(cmdinfo); } @@ -512,7 +512,7 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp) } static int uas_submit_urbs(struct scsi_cmnd *cmnd, - struct uas_dev_info *devinfo, gfp_t gfp) + struct uas_dev_info *devinfo) { struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; struct urb *urb; @@ -520,14 +520,14 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, lockdep_assert_held(&devinfo->lock); if (cmdinfo->state & SUBMIT_STATUS_URB) { - urb = uas_submit_sense_urb(cmnd, gfp); + urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC); if (!urb) return SCSI_MLQUEUE_DEVICE_BUSY; cmdinfo->state &= ~SUBMIT_STATUS_URB; } if (cmdinfo->state & ALLOC_DATA_IN_URB) { - cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp, + cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_FROM_DEVICE); if (!cmdinfo->data_in_urb) return SCSI_MLQUEUE_DEVICE_BUSY; @@ -536,7 +536,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (cmdinfo->state & SUBMIT_DATA_IN_URB) { usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs); - err = usb_submit_urb(cmdinfo->data_in_urb, gfp); + err = usb_submit_urb(cmdinfo->data_in_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->data_in_urb); uas_log_cmd_state(cmnd, "data in submit err", err); @@ -547,7 +547,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, } if (cmdinfo->state & ALLOC_DATA_OUT_URB) { - cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp, + cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_TO_DEVICE); if (!cmdinfo->data_out_urb) return SCSI_MLQUEUE_DEVICE_BUSY; @@ -556,7 +556,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (cmdinfo->state & SUBMIT_DATA_OUT_URB) { usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs); - err = usb_submit_urb(cmdinfo->data_out_urb, gfp); + err = usb_submit_urb(cmdinfo->data_out_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->data_out_urb); uas_log_cmd_state(cmnd, "data out submit err", err); @@ -567,7 +567,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, } if (cmdinfo->state & ALLOC_CMD_URB) { - cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd); + cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd); if (!cmdinfo->cmd_urb) return SCSI_MLQUEUE_DEVICE_BUSY; cmdinfo->state &= ~ALLOC_CMD_URB; @@ -575,7 +575,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (cmdinfo->state & SUBMIT_CMD_URB) { usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs); - err = usb_submit_urb(cmdinfo->cmd_urb, gfp); + err = usb_submit_urb(cmdinfo->cmd_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->cmd_urb); uas_log_cmd_state(cmnd, "cmd submit err", err); @@ -653,7 +653,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, if (!devinfo->use_streams) cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); - err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC); + err = uas_submit_urbs(cmnd, devinfo); if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c index bdcb13cc1d54..01c20a260a8b 100644 --- a/drivers/uwb/uwbd.c +++ b/drivers/uwb/uwbd.c @@ -279,7 +279,6 @@ static int uwbd(void *param) HZ); if (should_stop) break; - try_to_freeze(); spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); if (!list_empty(&rc->uwbd.event_list)) { diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index b335c1ae8625..fe00a07c122e 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) port = FSL_DIU_PORT_DLVDS; } - return diu_ops.valid_monitor_port(port); + if (diu_ops.valid_monitor_port) + port = diu_ops.valid_monitor_port(port); + + return port; } /* @@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void) #else monitor_port = fsl_diu_name_to_port(monitor_string); #endif + + /* + * Must to verify set_pixel_clock. If not implement on platform, + * then that means that there is no platform support for the DIU. + */ + if (!diu_ops.set_pixel_clock) + return -ENODEV; + pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); #ifdef CONFIG_NOT_COHERENT_CACHE diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c index 99ca268c1cdd..d05a54922ba6 100644 --- a/drivers/video/fbdev/omap2/dss/venc.c +++ b/drivers/video/fbdev/omap2/dss/venc.c @@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = { .vbp = 41, .interlace = true, + + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; EXPORT_SYMBOL(omap_dss_pal_timings); @@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = { .vbp = 31, .interlace = true, + + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; EXPORT_SYMBOL(omap_dss_ntsc_timings); diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index aa8a7f71f310..9b7a35c9e51d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_X86) += fallback.o -obj-y += grant-table.o features.o balloon.o manage.o preempt.o +obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o obj-y += events/ obj-y += xenbus/ diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c index 90307c0b630c..6893c79fd2a1 100644 --- a/drivers/xen/acpi.c +++ b/drivers/xen/acpi.c @@ -58,7 +58,7 @@ static int xen_acpi_notify_hypervisor_state(u8 sleep_state, bits, val_a, val_b)) return -1; - HYPERVISOR_dom0_op(&op); + HYPERVISOR_platform_op(&op); return 1; } diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c index f745db270171..be7e56a338e8 100644 --- a/drivers/xen/efi.c +++ b/drivers/xen/efi.c @@ -42,7 +42,7 @@ static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { struct xen_platform_op op = INIT_EFI_OP(get_time); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; if (tm) { @@ -67,7 +67,7 @@ static efi_status_t xen_efi_set_time(efi_time_t *tm) BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.set_time)); memcpy(&efi_data(op).u.set_time, tm, sizeof(*tm)); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; @@ -79,7 +79,7 @@ static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, { struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; if (tm) { @@ -108,7 +108,7 @@ static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) else efi_data(op).misc |= XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY; - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; @@ -129,7 +129,7 @@ static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_data(op).u.get_variable.size = *data_size; set_xen_guest_handle(efi_data(op).u.get_variable.data, data); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *data_size = efi_data(op).u.get_variable.size; @@ -152,7 +152,7 @@ static efi_status_t xen_efi_get_next_variable(unsigned long *name_size, memcpy(&efi_data(op).u.get_next_variable_name.vendor_guid, vendor, sizeof(*vendor)); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *name_size = efi_data(op).u.get_next_variable_name.size; @@ -178,7 +178,7 @@ static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_data(op).u.set_variable.size = data_size; set_xen_guest_handle(efi_data(op).u.set_variable.data, data); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; @@ -196,7 +196,7 @@ static efi_status_t xen_efi_query_variable_info(u32 attr, efi_data(op).u.query_variable_info.attr = attr; - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *storage_space = efi_data(op).u.query_variable_info.max_store_size; @@ -210,7 +210,7 @@ static efi_status_t xen_efi_get_next_high_mono_count(u32 *count) { struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count); - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *count = efi_data(op).misc; @@ -232,7 +232,7 @@ static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, efi_data(op).u.update_capsule.capsule_count = count; efi_data(op).u.update_capsule.sg_list = sg_list; - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; @@ -252,7 +252,7 @@ static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, capsules); efi_data(op).u.query_capsule_capabilities.capsule_count = count; - if (HYPERVISOR_dom0_op(&op) < 0) + if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *max_size = efi_data(op).u.query_capsule_capabilities.max_capsule_size; @@ -331,7 +331,7 @@ efi_system_table_t __init *xen_efi_probe(void) }; union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info; - if (!xen_initial_domain() || HYPERVISOR_dom0_op(&op) < 0) + if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0) return NULL; /* Here we know that Xen runs on EFI platform. */ @@ -347,7 +347,7 @@ efi_system_table_t __init *xen_efi_probe(void) info->vendor.bufsz = sizeof(vendor); set_xen_guest_handle(info->vendor.name, vendor); - if (HYPERVISOR_dom0_op(&op) == 0) { + if (HYPERVISOR_platform_op(&op) == 0) { efi_systab_xen.fw_vendor = __pa_symbol(vendor); efi_systab_xen.fw_revision = info->vendor.revision; } else @@ -357,14 +357,14 @@ efi_system_table_t __init *xen_efi_probe(void) op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VERSION; - if (HYPERVISOR_dom0_op(&op) == 0) + if (HYPERVISOR_platform_op(&op) == 0) efi_systab_xen.hdr.revision = info->version; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION; - if (HYPERVISOR_dom0_op(&op) == 0) + if (HYPERVISOR_platform_op(&op) == 0) efi.runtime_version = info->version; return &efi_systab_xen; diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index e3e9e3d46d1b..96a1b8da5371 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port) static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, unsigned long *ready) + unsigned priority, unsigned long *ready, + bool drop) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu, if (head == 0) clear_bit(priority, ready); - if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) - handle_irq_for_port(port); + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { + if (unlikely(drop)) + pr_warn("Dropping pending event for port %u\n", port); + else + handle_irq_for_port(port); + } q->head[priority] = head; } -static void evtchn_fifo_handle_events(unsigned cpu) +static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) { struct evtchn_fifo_control_block *control_block; unsigned long ready; @@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu) while (ready) { q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); - consume_one_event(cpu, control_block, q, &ready); + consume_one_event(cpu, control_block, q, &ready, drop); ready |= xchg(&control_block->ready, 0); } } +static void evtchn_fifo_handle_events(unsigned cpu) +{ + __evtchn_fifo_handle_events(cpu, false); +} + static void evtchn_fifo_resume(void) { unsigned cpu; @@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self, if (!per_cpu(cpu_control_block, cpu)) ret = evtchn_fifo_alloc_control_block(cpu); break; + case CPU_DEAD: + __evtchn_fifo_handle_events(cpu, true); + break; default: break; } diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 1be5dd048622..dc495383ad73 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -518,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn, mutex_unlock(&priv->lock); } -static struct mmu_notifier_ops gntdev_mmu_ops = { +static const struct mmu_notifier_ops gntdev_mmu_ops = { .release = mn_release, .invalidate_page = mn_invl_page, .invalidate_range_start = mn_invl_range_start, @@ -748,6 +748,206 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) return rc; } +#define GNTDEV_COPY_BATCH 24 + +struct gntdev_copy_batch { + struct gnttab_copy ops[GNTDEV_COPY_BATCH]; + struct page *pages[GNTDEV_COPY_BATCH]; + s16 __user *status[GNTDEV_COPY_BATCH]; + unsigned int nr_ops; + unsigned int nr_pages; +}; + +static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, + bool writeable, unsigned long *gfn) +{ + unsigned long addr = (unsigned long)virt; + struct page *page; + unsigned long xen_pfn; + int ret; + + ret = get_user_pages_fast(addr, 1, writeable, &page); + if (ret < 0) + return ret; + + batch->pages[batch->nr_pages++] = page; + + xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK); + *gfn = pfn_to_gfn(xen_pfn); + + return 0; +} + +static void gntdev_put_pages(struct gntdev_copy_batch *batch) +{ + unsigned int i; + + for (i = 0; i < batch->nr_pages; i++) + put_page(batch->pages[i]); + batch->nr_pages = 0; +} + +static int gntdev_copy(struct gntdev_copy_batch *batch) +{ + unsigned int i; + + gnttab_batch_copy(batch->ops, batch->nr_ops); + gntdev_put_pages(batch); + + /* + * For each completed op, update the status if the op failed + * and all previous ops for the segment were successful. + */ + for (i = 0; i < batch->nr_ops; i++) { + s16 status = batch->ops[i].status; + s16 old_status; + + if (status == GNTST_okay) + continue; + + if (__get_user(old_status, batch->status[i])) + return -EFAULT; + + if (old_status != GNTST_okay) + continue; + + if (__put_user(status, batch->status[i])) + return -EFAULT; + } + + batch->nr_ops = 0; + return 0; +} + +static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, + struct gntdev_grant_copy_segment *seg, + s16 __user *status) +{ + uint16_t copied = 0; + + /* + * Disallow local -> local copies since there is only space in + * batch->pages for one page per-op and this would be a very + * expensive memcpy(). + */ + if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref))) + return -EINVAL; + + /* Can't cross page if source/dest is a grant ref. */ + if (seg->flags & GNTCOPY_source_gref) { + if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE) + return -EINVAL; + } + if (seg->flags & GNTCOPY_dest_gref) { + if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE) + return -EINVAL; + } + + if (put_user(GNTST_okay, status)) + return -EFAULT; + + while (copied < seg->len) { + struct gnttab_copy *op; + void __user *virt; + size_t len, off; + unsigned long gfn; + int ret; + + if (batch->nr_ops >= GNTDEV_COPY_BATCH) { + ret = gntdev_copy(batch); + if (ret < 0) + return ret; + } + + len = seg->len - copied; + + op = &batch->ops[batch->nr_ops]; + op->flags = 0; + + if (seg->flags & GNTCOPY_source_gref) { + op->source.u.ref = seg->source.foreign.ref; + op->source.domid = seg->source.foreign.domid; + op->source.offset = seg->source.foreign.offset + copied; + op->flags |= GNTCOPY_source_gref; + } else { + virt = seg->source.virt + copied; + off = (unsigned long)virt & ~XEN_PAGE_MASK; + len = min(len, (size_t)XEN_PAGE_SIZE - off); + + ret = gntdev_get_page(batch, virt, false, &gfn); + if (ret < 0) + return ret; + + op->source.u.gmfn = gfn; + op->source.domid = DOMID_SELF; + op->source.offset = off; + } + + if (seg->flags & GNTCOPY_dest_gref) { + op->dest.u.ref = seg->dest.foreign.ref; + op->dest.domid = seg->dest.foreign.domid; + op->dest.offset = seg->dest.foreign.offset + copied; + op->flags |= GNTCOPY_dest_gref; + } else { + virt = seg->dest.virt + copied; + off = (unsigned long)virt & ~XEN_PAGE_MASK; + len = min(len, (size_t)XEN_PAGE_SIZE - off); + + ret = gntdev_get_page(batch, virt, true, &gfn); + if (ret < 0) + return ret; + + op->dest.u.gmfn = gfn; + op->dest.domid = DOMID_SELF; + op->dest.offset = off; + } + + op->len = len; + copied += len; + + batch->status[batch->nr_ops] = status; + batch->nr_ops++; + } + + return 0; +} + +static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u) +{ + struct ioctl_gntdev_grant_copy copy; + struct gntdev_copy_batch batch; + unsigned int i; + int ret = 0; + + if (copy_from_user(©, u, sizeof(copy))) + return -EFAULT; + + batch.nr_ops = 0; + batch.nr_pages = 0; + + for (i = 0; i < copy.count; i++) { + struct gntdev_grant_copy_segment seg; + + if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) { + ret = -EFAULT; + goto out; + } + + ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status); + if (ret < 0) + goto out; + + cond_resched(); + } + if (batch.nr_ops) + ret = gntdev_copy(&batch); + return ret; + + out: + gntdev_put_pages(&batch); + return ret; +} + static long gntdev_ioctl(struct file *flip, unsigned int cmd, unsigned long arg) { @@ -767,6 +967,9 @@ static long gntdev_ioctl(struct file *flip, case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: return gntdev_ioctl_notify(priv, ptr); + case IOCTL_GNTDEV_GRANT_COPY: + return gntdev_ioctl_grant_copy(priv, ptr); + default: pr_debug("priv %p, unknown cmd %x\n", priv, cmd); return -ENOIOCTLCMD; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index c49f79ed58c5..effbaf91791f 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -128,7 +128,7 @@ struct unmap_refs_callback_data { int result; }; -static struct gnttab_ops *gnttab_interface; +static const struct gnttab_ops *gnttab_interface; static int grant_table_version; static int grefs_per_grant_frame; @@ -1013,7 +1013,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) return rc; } -static struct gnttab_ops gnttab_v1_ops = { +static const struct gnttab_ops gnttab_v1_ops = { .map_frames = gnttab_map_frames_v1, .unmap_frames = gnttab_unmap_frames_v1, .update_entry = gnttab_update_entry_v1, diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 49e88f2ce7a1..cdc6daa7a9f6 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -78,7 +78,7 @@ static int xen_pcpu_down(uint32_t cpu_id) .u.cpu_ol.cpuid = cpu_id, }; - return HYPERVISOR_dom0_op(&op); + return HYPERVISOR_platform_op(&op); } static int xen_pcpu_up(uint32_t cpu_id) @@ -89,7 +89,7 @@ static int xen_pcpu_up(uint32_t cpu_id) .u.cpu_ol.cpuid = cpu_id, }; - return HYPERVISOR_dom0_op(&op); + return HYPERVISOR_platform_op(&op); } static ssize_t show_online(struct device *dev, @@ -277,7 +277,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu) .u.pcpu_info.xen_cpuid = cpu, }; - ret = HYPERVISOR_dom0_op(&op); + ret = HYPERVISOR_platform_op(&op); if (ret) return ret; @@ -364,7 +364,7 @@ int xen_pcpu_id(uint32_t acpi_id) op.cmd = XENPF_get_cpuinfo; while (cpu_id <= max_id) { op.u.pcpu_info.xen_cpuid = cpu_id; - if (HYPERVISOR_dom0_op(&op)) { + if (HYPERVISOR_platform_op(&op)) { cpu_id++; continue; } diff --git a/drivers/xen/time.c b/drivers/xen/time.c new file mode 100644 index 000000000000..71078425c9ea --- /dev/null +++ b/drivers/xen/time.c @@ -0,0 +1,88 @@ +/* + * Xen stolen ticks accounting. + */ +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/math64.h> +#include <linux/gfp.h> + +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> + +#include <xen/events.h> +#include <xen/features.h> +#include <xen/interface/xen.h> +#include <xen/interface/vcpu.h> +#include <xen/xen-ops.h> + +/* runstate info updated by Xen */ +static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); + +/* return an consistent snapshot of 64-bit time/counter value */ +static u64 get64(const u64 *p) +{ + u64 ret; + + if (BITS_PER_LONG < 64) { + u32 *p32 = (u32 *)p; + u32 h, l, h2; + + /* + * Read high then low, and then make sure high is + * still the same; this will only loop if low wraps + * and carries into high. + * XXX some clean way to make this endian-proof? + */ + do { + h = READ_ONCE(p32[1]); + l = READ_ONCE(p32[0]); + h2 = READ_ONCE(p32[1]); + } while(h2 != h); + + ret = (((u64)h) << 32) | l; + } else + ret = READ_ONCE(*p); + + return ret; +} + +/* + * Runstate accounting + */ +void xen_get_runstate_snapshot(struct vcpu_runstate_info *res) +{ + u64 state_time; + struct vcpu_runstate_info *state; + + BUG_ON(preemptible()); + + state = this_cpu_ptr(&xen_runstate); + + /* + * The runstate info is always updated by the hypervisor on + * the current CPU, so there's no need to use anything + * stronger than a compiler barrier when fetching it. + */ + do { + state_time = get64(&state->state_entry_time); + *res = READ_ONCE(*state); + } while (get64(&state->state_entry_time) != state_time); +} + +/* return true when a vcpu could run but has no real cpu to run on */ +bool xen_vcpu_stolen(int vcpu) +{ + return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; +} + +void xen_setup_runstate_info(int cpu) +{ + struct vcpu_register_runstate_memory_area area; + + area.addr.v = &per_cpu(xen_runstate, cpu); + + if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, + cpu, &area)) + BUG(); +} + diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c index f4a369429553..fdc9e67b842d 100644 --- a/drivers/xen/xen-acpi-cpuhotplug.c +++ b/drivers/xen/xen-acpi-cpuhotplug.c @@ -206,7 +206,7 @@ static int xen_hotadd_cpu(struct acpi_processor *pr) op.u.cpu_add.acpi_id = pr->acpi_id; op.u.cpu_add.pxm = pxm; - cpu_id = HYPERVISOR_dom0_op(&op); + cpu_id = HYPERVISOR_platform_op(&op); if (cpu_id < 0) pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n", pr->acpi_id); diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c index f83b754505f8..23d1808fe027 100644 --- a/drivers/xen/xen-acpi-pad.c +++ b/drivers/xen/xen-acpi-pad.c @@ -36,7 +36,7 @@ static int xen_acpi_pad_idle_cpus(unsigned int idle_nums) op.u.core_parking.type = XEN_CORE_PARKING_SET; op.u.core_parking.idle_nums = idle_nums; - return HYPERVISOR_dom0_op(&op); + return HYPERVISOR_platform_op(&op); } static int xen_acpi_pad_idle_cpus_num(void) @@ -46,7 +46,7 @@ static int xen_acpi_pad_idle_cpus_num(void) op.cmd = XENPF_core_parking; op.u.core_parking.type = XEN_CORE_PARKING_GET; - return HYPERVISOR_dom0_op(&op) + return HYPERVISOR_platform_op(&op) ?: op.u.core_parking.idle_nums; } diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 70fa438000af..076970a54f89 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -116,7 +116,7 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states); if (!no_hypercall) - ret = HYPERVISOR_dom0_op(&op); + ret = HYPERVISOR_platform_op(&op); if (!ret) { pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id); @@ -244,7 +244,7 @@ static int push_pxx_to_hypervisor(struct acpi_processor *_pr) } if (!no_hypercall) - ret = HYPERVISOR_dom0_op(&op); + ret = HYPERVISOR_platform_op(&op); if (!ret) { struct acpi_processor_performance *perf; @@ -302,7 +302,7 @@ static unsigned int __init get_max_acpi_id(void) info = &op.u.pcpu_info; info->xen_cpuid = 0; - ret = HYPERVISOR_dom0_op(&op); + ret = HYPERVISOR_platform_op(&op); if (ret) return NR_CPUS; @@ -310,7 +310,7 @@ static unsigned int __init get_max_acpi_id(void) last_cpu = op.u.pcpu_info.max_present; for (i = 0; i <= last_cpu; i++) { info->xen_cpuid = i; - ret = HYPERVISOR_dom0_op(&op); + ret = HYPERVISOR_platform_op(&op); if (ret) continue; max_acpi_id = max(info->acpi_id, max_acpi_id); diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index 58e38d586f52..4d529f3e40df 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -37,6 +37,7 @@ struct xen_pcibk_device { struct xen_pci_sharedinfo *sh_info; unsigned long flags; struct work_struct op_work; + struct xen_pci_op op; }; struct xen_pcibk_dev_data { diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index c4a0666de6f5..73dafdc494aa 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) enable ? "enable" : "disable"); if (enable) { + /* + * The MSI or MSI-X should not have an IRQ handler. Otherwise + * if the guest terminates we BUG_ON in free_msi_irqs. + */ + if (dev->msi_enabled || dev->msix_enabled) + goto out; + rc = request_irq(dev_data->irq, xen_pcibk_guest_interrupt, IRQF_SHARED, dev_data->irq_name, dev); @@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); - status = pci_enable_msi(dev); + if (dev->msi_enabled) + status = -EALREADY; + else if (dev->msix_enabled) + status = -ENXIO; + else + status = pci_enable_msi(dev); if (status) { pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", @@ -173,20 +185,23 @@ static int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { - struct xen_pcibk_dev_data *dev_data; - if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", pci_name(dev)); - pci_disable_msi(dev); + if (dev->msi_enabled) { + struct xen_pcibk_dev_data *dev_data; + + pci_disable_msi(dev); + + dev_data = pci_get_drvdata(dev); + if (dev_data) + dev_data->ack_intr = 1; + } op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), op->value); - dev_data = pci_get_drvdata(dev); - if (dev_data) - dev_data->ack_intr = 1; return 0; } @@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct xen_pcibk_dev_data *dev_data; int i, result; struct msix_entry *entries; + u16 cmd; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", pci_name(dev)); + if (op->value > SH_INFO_MAX_VEC) return -EINVAL; + if (dev->msix_enabled) + return -EALREADY; + + /* + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able + * to access the BARs where the MSI-X entries reside. + */ + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) + return -ENXIO; + entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; @@ -245,23 +273,27 @@ static int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { - struct xen_pcibk_dev_data *dev_data; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", pci_name(dev)); - pci_disable_msix(dev); + if (dev->msix_enabled) { + struct xen_pcibk_dev_data *dev_data; + + pci_disable_msix(dev); + + dev_data = pci_get_drvdata(dev); + if (dev_data) + dev_data->ack_intr = 1; + } /* * SR-IOV devices (which don't have any legacy IRQ) have * an undefined IRQ value of zero. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), - op->value); - dev_data = pci_get_drvdata(dev); - if (dev_data) - dev_data->ack_intr = 1; + printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", + pci_name(dev), op->value); return 0; } #endif @@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data) container_of(data, struct xen_pcibk_device, op_work); struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data = NULL; - struct xen_pci_op *op = &pdev->sh_info->op; + struct xen_pci_op *op = &pdev->op; int test_intx = 0; + *op = pdev->sh_info->op; + barrier(); dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); if (dev == NULL) @@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data) if ((dev_data->enable_intx != test_intx)) xen_pcibk_control_isr(dev, 0 /* no reset */); } + pdev->sh_info->op.err = op->err; + pdev->sh_info->op.value = op->value; +#ifdef CONFIG_PCI_MSI + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { + unsigned int i; + + for (i = 0; i < op->value; i++) + pdev->sh_info->op.msix_entries[i].vector = + op->msix_entries[i].vector; + } +#endif /* Tell the driver domain that we're done. */ wmb(); clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 98bc345f296e..4843741e703a 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); pdev->xdev = xdev; - dev_set_drvdata(&xdev->dev, pdev); mutex_init(&pdev->dev_lock); @@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) kfree(pdev); pdev = NULL; } + + dev_set_drvdata(&xdev->dev, pdev); + out: return pdev; } diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 43bcae852546..ad4eb1024d1f 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) if (!pending_req) return 1; - ring_req = *RING_GET_REQUEST(ring, rc); + RING_COPY_REQUEST(ring, rc, &ring_req); ring->req_cons = ++rc; err = prepare_pending_reqs(info, &ring_req, pending_req); diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c index f8b12856753f..a03f261b12d8 100644 --- a/drivers/xen/xenfs/xensyms.c +++ b/drivers/xen/xenfs/xensyms.c @@ -31,7 +31,7 @@ static int xensyms_next_sym(struct xensyms *xs) symnum = symdata->symnum; - ret = HYPERVISOR_dom0_op(&xs->op); + ret = HYPERVISOR_platform_op(&xs->op); if (ret < 0) return ret; @@ -50,7 +50,7 @@ static int xensyms_next_sym(struct xensyms *xs) set_xen_guest_handle(symdata->name, xs->name); symdata->symnum--; /* Rewind */ - ret = HYPERVISOR_dom0_op(&xs->op); + ret = HYPERVISOR_platform_op(&xs->op); if (ret < 0) return ret; } |