summaryrefslogtreecommitdiff
path: root/drivers/usb/dwc2
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-11-18 16:02:15 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-11-18 16:02:15 +0100
commitae4d814bf1f2b0b7a37b3c30970d6974911f1377 (patch)
treed6332943d12e23432c8e455c2a9d71ac82cbc727 /drivers/usb/dwc2
parent37be66767e3cae4fd16e064d8bb7f9f72bf5c045 (diff)
parentd5c024f3761dbd512329d3b7234a07dcf7580f0a (diff)
Merge tag 'usb-for-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next
Felipe writes: usb: patches for v4.10 merge window One big merge this time with a total of 166 non-merge commits. Most of the work, by far, is on dwc2 this time (68.2%) with dwc3 a far second (22.5%). The remaining 9.3% are scattered on gadget drivers. The most important changes for dwc2 are the peripheral side DMA support implemented by Synopsys folks and support for the new IOT dwc2 compatible core from Synopsys. In dwc3 land we have support for high-bandwidth, high-speed isochronous endpoints and some non-critical fixes for large scatter lists. Apart from these, we have our usual set of cleanups, non-critical fixes, etc.
Diffstat (limited to 'drivers/usb/dwc2')
-rw-r--r--drivers/usb/dwc2/Makefile1
-rw-r--r--drivers/usb/dwc2/core.c930
-rw-r--r--drivers/usb/dwc2/core.h324
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c1126
-rw-r--r--drivers/usb/dwc2/hcd.c264
-rw-r--r--drivers/usb/dwc2/hcd.h7
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c56
-rw-r--r--drivers/usb/dwc2/hcd_intr.c55
-rw-r--r--drivers/usb/dwc2/hcd_queue.c87
-rw-r--r--drivers/usb/dwc2/hw.h48
-rw-r--r--drivers/usb/dwc2/params.c1435
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc2/platform.c207
15 files changed, 2751 insertions, 1815 deletions
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 50fdaace1e73..b9237e1e45d0 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -3,6 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC2) += dwc2.o
dwc2-y := core.o core_intr.o platform.o
+dwc2-y += params.o
ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
dwc2-y += hcd.o hcd_intr.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4c0fa0b17353..11d8ae9aead1 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
@@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
/* Backup all registers */
@@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
* the force mode. We only need to call this once during probe if
* dr_mode == OTG.
*/
-static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{
u32 gusbcfg;
@@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HAINTMSK;
dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HFLBADDR;
dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
addr = hsotg->regs + HCCHAR(i);
dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
@@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HCDMA(i);
dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HCDMAB(i);
dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
udelay(1);
}
-#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
-
-/* Parameter access functions */
-void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- switch (val) {
- case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
- if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
- valid = 0;
- break;
- case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- break;
- default:
- valid = 0;
- break;
- }
- break;
- case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
- /* always valid */
- break;
- default:
- valid = 0;
- break;
- }
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for otg_cap parameter. Check HW configuration.\n",
- val);
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
- break;
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
- break;
- default:
- val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
- break;
- }
- dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
- }
-
- hsotg->core_params->otg_cap = val;
-}
-
-void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_enable parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
- dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_enable = val;
-}
-
-void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_desc_enable = val;
-}
-
-void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- }
-
- hsotg->core_params->dma_desc_fs_enable = val;
- dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
-}
-
-void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_support_fs_low_power\n");
- dev_err(hsotg->dev,
- "host_support_fs_low_power must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev,
- "Setting host_support_fs_low_power to %d\n", val);
- }
-
- hsotg->core_params->host_support_fs_ls_low_power = val;
-}
-
-void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.enable_dynamic_fifo;
- dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
- }
-
- hsotg->core_params->enable_dynamic_fifo = val;
-}
-
-void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_rx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
- }
-
- hsotg->core_params->host_rx_fifo_size = val;
-}
-
-void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_nperio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_nperio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_perio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_perio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_transfer_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_transfer_size;
- dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
- }
-
- hsotg->core_params->max_transfer_size = val;
-}
-
-void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 15 || val > hsotg->hw_params.max_packet_count)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_packet_count. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_packet_count;
- dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
- }
-
- hsotg->core_params->max_packet_count = val;
-}
-
-void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 1 || val > hsotg->hw_params.host_channels)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_channels. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_channels;
- dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
- }
-
- hsotg->core_params->host_channels = val;
-}
-
-void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
- u32 hs_phy_type, fs_phy_type;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
- DWC2_PHY_TYPE_PARAM_ULPI)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_type\n");
- dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
- }
-
- valid = 0;
- }
-
- hs_phy_type = hsotg->hw_params.hs_phy_type;
- fs_phy_type = hsotg->hw_params.fs_phy_type;
- if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_FS &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
- valid = 1;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for phy_type. Check HW configuration.\n",
- val);
- val = DWC2_PHY_TYPE_PARAM_FS;
- if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
- if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
- val = DWC2_PHY_TYPE_PARAM_UTMI;
- else
- val = DWC2_PHY_TYPE_PARAM_ULPI;
- }
- dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- }
-
- hsotg->core_params->phy_type = val;
-}
-
-static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
-{
- return hsotg->core_params->phy_type;
-}
-
-void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for speed parameter\n");
- dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_SPEED_PARAM_HIGH &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for speed parameter. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
- DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
- dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
- }
-
- hsotg->core_params->speed = val;
-}
-
-void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
- DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_ls_low_power_phy_clk parameter\n");
- dev_err(hsotg->dev,
- "host_ls_low_power_phy_clk must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
- ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
- : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
- dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
- val);
- }
-
- hsotg->core_params->host_ls_low_power_phy_clk = val;
-}
-
-void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
- dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ddr = val;
-}
-
-void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for phy_ulpi_ext_vbus\n");
- dev_err(hsotg->dev,
- "phy_ulpi_ext_vbus must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ext_vbus = val;
-}
-
-void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
-
- switch (hsotg->hw_params.utmi_phy_data_width) {
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
- valid = (val == 8);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
- valid = (val == 16);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
- valid = (val == 8 || val == 16);
- break;
- }
-
- if (!valid) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "%d invalid for phy_utmi_width. Check HW configuration.\n",
- val);
- }
- val = (hsotg->hw_params.utmi_phy_data_width ==
- GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
- dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
- }
-
- hsotg->core_params->phy_utmi_width = val;
-}
-
-void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
- dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
- }
-
- hsotg->core_params->ulpi_fs_ls = val;
-}
-
-void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ts_dline\n");
- dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
- }
-
- hsotg->core_params->ts_dline = val;
-}
-
-void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
- dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
- }
-
- valid = 0;
- }
-
- if (val == 1 && !(hsotg->hw_params.i2c_enable))
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for i2c_enable. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.i2c_enable;
- dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- }
-
- hsotg->core_params->i2c_enable = val;
-}
-
-void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for en_multiple_tx_fifo,\n");
- dev_err(hsotg->dev,
- "en_multiple_tx_fifo must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.en_multiple_tx_fifo;
- dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
- }
-
- hsotg->core_params->en_multiple_tx_fifo = val;
-}
-
-void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter reload_ctl\n", val);
- dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter reload_ctl. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
- dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
- }
-
- hsotg->core_params->reload_ctl = val;
-}
-
-void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
-{
- if (val != -1)
- hsotg->core_params->ahbcfg = val;
- else
- hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
- GAHBCFG_HBSTLEN_SHIFT;
-}
-
-void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter otg_ver\n", val);
- dev_err(hsotg->dev,
- "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
- }
-
- hsotg->core_params->otg_ver = val;
-}
-
-static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter uframe_sched\n",
- val);
- dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
- }
- val = 1;
- dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
- }
-
- hsotg->core_params->uframe_sched = val;
-}
-
-static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter external_id_pin_ctl\n",
- val);
- dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
- }
-
- hsotg->core_params->external_id_pin_ctl = val;
-}
-
-static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter hibernation\n",
- val);
- dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
- }
-
- hsotg->core_params->hibernation = val;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core.
- */
-void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
-{
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- dwc2_set_param_dma_enable(hsotg, params->dma_enable);
- dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
- dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
- dwc2_set_param_host_support_fs_ls_low_power(hsotg,
- params->host_support_fs_ls_low_power);
- dwc2_set_param_enable_dynamic_fifo(hsotg,
- params->enable_dynamic_fifo);
- dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
- dwc2_set_param_max_transfer_size(hsotg,
- params->max_transfer_size);
- dwc2_set_param_max_packet_count(hsotg,
- params->max_packet_count);
- dwc2_set_param_host_channels(hsotg, params->host_channels);
- dwc2_set_param_phy_type(hsotg, params->phy_type);
- dwc2_set_param_speed(hsotg, params->speed);
- dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
- params->host_ls_low_power_phy_clk);
- dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
- dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
- params->phy_ulpi_ext_vbus);
- dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
- dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
- dwc2_set_param_ts_dline(hsotg, params->ts_dline);
- dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
- dwc2_set_param_en_multiple_tx_fifo(hsotg,
- params->en_multiple_tx_fifo);
- dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
- dwc2_set_param_otg_ver(hsotg, params->otg_ver);
- dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
- dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
- dwc2_set_param_hibernation(hsotg, params->hibernation);
-}
-
/*
* Forces either host or device mode if the controller is not
* currently in that mode.
*
* Returns true if the mode was forced.
*/
-static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
{
if (host && dwc2_is_host_mode(hsotg))
return false;
@@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
return dwc2_force_mode(hsotg, host);
}
-/*
- * Gets host hardware parameters. Forces host mode if not currently in
- * host mode. Should be called immediately after a core soft reset in
- * order to get the reset values.
- */
-static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- u32 gnptxfsiz;
- u32 hptxfsiz;
- bool forced;
-
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, true);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
- hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/*
- * Gets device hardware parameters. Forces device mode if not
- * currently in device mode. Should be called immediately after a core
- * soft reset in order to get the reset values.
- */
-static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- bool forced;
- u32 gnptxfsiz;
-
- if (hsotg->dr_mode == USB_DR_MODE_HOST)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, false);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/**
- * During device initialization, read various hardware configuration
- * registers and interpret the contents.
- */
-int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- unsigned width;
- u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
- u32 grxfsiz;
-
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
- * as in "OTG version 2.xx" or "OTG version 3.xx".
- */
- hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
- if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
- (hw->snpsid & 0xfffff000) != 0x4f543000) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
- hw->snpsid);
- return -ENODEV;
- }
-
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
- hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
- hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
- hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
- hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
- hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
- hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
-
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
- dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
- dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
- dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
- dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
-
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
- /* hwcfg1 */
- hw->dev_ep_dirs = hwcfg1;
-
- /* hwcfg2 */
- hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
- GHWCFG2_OP_MODE_SHIFT;
- hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
- GHWCFG2_ARCHITECTURE_SHIFT;
- hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
- hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
- GHWCFG2_NUM_HOST_CHAN_SHIFT);
- hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
- GHWCFG2_HS_PHY_TYPE_SHIFT;
- hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
- GHWCFG2_FS_PHY_TYPE_SHIFT;
- hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
- GHWCFG2_NUM_DEV_EP_SHIFT;
- hw->nperio_tx_q_depth =
- (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->host_perio_tx_q_depth =
- (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->dev_token_q_depth =
- (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
- GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
-
- /* hwcfg3 */
- width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_transfer_size = (1 << (width + 11)) - 1;
- width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_packet_count = (1 << (width + 4)) - 1;
- hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
- hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
- GHWCFG3_DFIFO_DEPTH_SHIFT;
-
- /* hwcfg4 */
- hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
- hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
- hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
- hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
- hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
- GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
-
- /* fifo sizes */
- hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
- GRXFSIZ_DEPTH_SHIFT;
-
- dev_dbg(hsotg->dev, "Detected values from hardware:\n");
- dev_dbg(hsotg->dev, " op_mode=%d\n",
- hw->op_mode);
- dev_dbg(hsotg->dev, " arch=%d\n",
- hw->arch);
- dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
- hw->dma_desc_enable);
- dev_dbg(hsotg->dev, " power_optimized=%d\n",
- hw->power_optimized);
- dev_dbg(hsotg->dev, " i2c_enable=%d\n",
- hw->i2c_enable);
- dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
- hw->hs_phy_type);
- dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
- hw->fs_phy_type);
- dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
- hw->utmi_phy_data_width);
- dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
- hw->num_dev_ep);
- dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
- hw->num_dev_perio_in_ep);
- dev_dbg(hsotg->dev, " host_channels=%d\n",
- hw->host_channels);
- dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
- hw->max_transfer_size);
- dev_dbg(hsotg->dev, " max_packet_count=%d\n",
- hw->max_packet_count);
- dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
- hw->nperio_tx_q_depth);
- dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
- hw->host_perio_tx_q_depth);
- dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
- hw->dev_token_q_depth);
- dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
- hw->enable_dynamic_fifo);
- dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
- hw->en_multiple_tx_fifo);
- dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
- hw->total_fifo_size);
- dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
- hw->host_rx_fifo_size);
- dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
- hw->host_nperio_tx_fifo_size);
- dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
- hw->host_perio_tx_fifo_size);
- dev_dbg(hsotg->dev, "\n");
-
- return 0;
-}
-
-/*
- * Sets all parameters to the given value.
- *
- * Assumes that the dwc2_core_params struct contains only integers.
- */
-void dwc2_set_all_params(struct dwc2_core_params *params, int value)
-{
- int *p = (int *)params;
- size_t size = sizeof(*params) / sizeof(*p);
- int i;
-
- for (i = 0; i < size; i++)
- p[i] = value;
-}
-
-
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
- return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
+ return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103;
}
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a0414b1d..9548d3e03453 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -172,6 +172,11 @@ struct dwc2_hsotg_req;
* @periodic: Set if this is a periodic ep, such as Interrupt
* @isochronous: Set if this is a isochronous ep
* @send_zlp: Set if we need to send a zero-length packet.
+ * @desc_list_dma: The DMA address of descriptor chain currently in use.
+ * @desc_list: Pointer to descriptor DMA chain head currently in use.
+ * @desc_count: Count of entries within the DMA descriptor chain of EP.
+ * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1.
+ * @next_desc: index of next free descriptor in the ISOC chain under SW control.
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
@@ -219,6 +224,13 @@ struct dwc2_hsotg_ep {
#define TARGET_FRAME_INITIAL 0xFFFFFFFF
bool frame_overrun;
+ dma_addr_t desc_list_dma;
+ struct dwc2_dma_desc *desc_list;
+ u8 desc_count;
+
+ unsigned char isoc_chain_num;
+ unsigned int next_desc;
+
char name[10];
};
@@ -286,7 +298,7 @@ enum dwc2_ep0_state {
* @otg_ver: OTG version supported
* 0 - 1.3 (default)
* 1 - 2.0
- * @dma_enable: Specifies whether to use slave or DMA mode for accessing
+ * @host_dma: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
* 0 - Slave (always available)
@@ -314,7 +326,8 @@ enum dwc2_ep0_state {
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default, if available)
* @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- * are enabled
+ * are enabled for non-periodic IN endpoints in device
+ * mode.
* @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
* dynamic FIFO sizing is enabled
* 16 to 32768
@@ -417,6 +430,20 @@ enum dwc2_ep0_state {
* needed.
* 0 - No (default)
* 1 - Yes
+ * @g_dma: Enables gadget dma usage (default: autodetect).
+ * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
+ * @g_rx_fifo_size: The periodic rx fifo size for the device, in
+ * DWORDS from 16-32768 (default: 2048 if
+ * possible, otherwise autodetect).
+ * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in
+ * DWORDS from 16-32768 (default: 1024 if
+ * possible, otherwise autodetect).
+ * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo
+ * mode. Each value corresponds to one EP
+ * starting from EP1 (max 15 values). Sizes are
+ * in DWORDS with possible values from from
+ * 16-32768 (default: 256, 256, 256, 256, 768,
+ * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0).
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -430,11 +457,18 @@ struct dwc2_core_params {
* dwc2_set_all_params!
*/
int otg_cap;
+#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+
int otg_ver;
- int dma_enable;
int dma_desc_enable;
int dma_desc_fs_enable;
int speed;
+#define DWC2_SPEED_PARAM_HIGH 0
+#define DWC2_SPEED_PARAM_FULL 1
+#define DWC2_SPEED_PARAM_LOW 2
+
int enable_dynamic_fifo;
int en_multiple_tx_fifo;
int host_rx_fifo_size;
@@ -444,19 +478,44 @@ struct dwc2_core_params {
int max_packet_count;
int host_channels;
int phy_type;
+#define DWC2_PHY_TYPE_PARAM_FS 0
+#define DWC2_PHY_TYPE_PARAM_UTMI 1
+#define DWC2_PHY_TYPE_PARAM_ULPI 2
+
int phy_utmi_width;
int phy_ulpi_ddr;
int phy_ulpi_ext_vbus;
+#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
+
int i2c_enable;
int ulpi_fs_ls;
int host_support_fs_ls_low_power;
int host_ls_low_power_phy_clk;
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
int ts_dline;
int reload_ctl;
int ahbcfg;
int uframe_sched;
int external_id_pin_ctl;
int hibernation;
+
+ /*
+ * The following parameters are *only* set via device
+ * properties and cannot be set directly in this structure.
+ */
+
+ /* Host parameters */
+ bool host_dma;
+
+ /* Gadget parameters */
+ bool g_dma;
+ bool g_dma_desc;
+ u16 g_rx_fifo_size;
+ u16 g_np_tx_fifo_size;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/**
@@ -516,10 +575,9 @@ struct dwc2_hw_params {
unsigned op_mode:3;
unsigned arch:2;
unsigned dma_desc_enable:1;
- unsigned dma_desc_fs_enable:1;
unsigned enable_dynamic_fifo:1;
unsigned en_multiple_tx_fifo:1;
- unsigned host_rx_fifo_size:16;
+ unsigned rx_fifo_size:16;
unsigned host_nperio_tx_fifo_size:16;
unsigned dev_nperio_tx_fifo_size:16;
unsigned host_perio_tx_fifo_size:16;
@@ -839,11 +897,13 @@ struct dwc2_hregs_backup {
* @ctrl_req: Request for EP0 control packets.
* @ep0_state: EP0 control transfers state
* @test_mode: USB test mode requested by the host
+ * @setup_desc_dma: EP0 setup stage desc chain DMA address
+ * @setup_desc: EP0 setup stage desc chain pointer
+ * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address
+ * @ctrl_in_desc: EP0 IN data phase desc chain pointer
+ * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address
+ * @ctrl_out_desc: EP0 OUT data phase desc chain pointer
* @eps: The endpoints being supplied to the gadget framework
- * @g_using_dma: Indicate if dma usage is enabled
- * @g_rx_fifo_sz: Contains rx fifo size value
- * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value
- * @g_tx_fifo_sz: Contains tx fifo size value per endpoints
*/
struct dwc2_hsotg {
struct device *dev;
@@ -851,7 +911,7 @@ struct dwc2_hsotg {
/** Params detected from hardware */
struct dwc2_hw_params hw_params;
/** Params to actually use */
- struct dwc2_core_params *core_params;
+ struct dwc2_core_params params;
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
unsigned int hcd_enabled:1;
@@ -891,6 +951,8 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_2_94a 0x4f54294a
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
+#define DWC2_FS_IOT_REV_1_00a 0x5531100a
+#define DWC2_HS_IOT_REV_1_00a 0x5532100a
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
@@ -986,15 +1048,18 @@ struct dwc2_hsotg {
enum dwc2_ep0_state ep0_state;
u8 test_mode;
+ dma_addr_t setup_desc_dma[2];
+ struct dwc2_dma_desc *setup_desc[2];
+ dma_addr_t ctrl_in_desc_dma;
+ struct dwc2_dma_desc *ctrl_in_desc;
+ dma_addr_t ctrl_out_desc_dma;
+ struct dwc2_dma_desc *ctrl_out_desc;
+
struct usb_gadget gadget;
unsigned int enabled:1;
unsigned int connected:1;
struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
- u32 g_using_dma;
- u32 g_rx_fifo_sz;
- u32 g_np_g_tx_fifo_sz;
- u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
@@ -1016,6 +1081,22 @@ enum dwc2_halt_status {
DWC2_HC_XFER_URB_DEQUEUE,
};
+/* Core version information */
+static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000;
+}
+
+static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000;
+}
+
+static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000;
+}
+
/*
* The following functions support initialization of the core driver component
* and the DWC_otg controller
@@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host);
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg);
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
@@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
/* This function should be called on every hardware interrupt. */
extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
-/* OTG Core Parameters */
-
-/*
- * Specifies the OTG capabilities. The driver will automatically
- * detect the value for this parameter if none is specified.
- * 0 - HNP and SRP capable (default)
- * 1 - SRP Only capable
- * 2 - No HNP/SRP capable
- */
-extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
-#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
-#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
-
-/*
- * Specifies whether to use slave or DMA mode for accessing the data
- * FIFOs. The driver will automatically detect the value for this
- * parameter if none is specified.
- * 0 - Slave
- * 1 - DMA (default, if available)
- */
-extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode for accessing the data
- * FIFOs in device mode. The driver will automatically detect
- * the value for this parameter if none is specified.
- * 0 - address DMA
- * 1 - DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode with full speed devices
- * for accessing the data FIFOs in host mode.
- * 0 - address DMA
- * 1 - FS DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Specifies the maximum speed of operation in host and device mode.
- * The actual speed depends on the speed of the attached device and
- * the value of phy_type. The actual speed depends on the speed of the
- * attached device.
- * 0 - High Speed (default)
- * 1 - Full Speed
- */
-extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_SPEED_PARAM_HIGH 0
-#define DWC2_SPEED_PARAM_FULL 1
-
-/*
- * Specifies whether low power mode is supported when attached
- * to a Full Speed or Low Speed device in host mode.
- *
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- */
-extern void dwc2_set_param_host_support_fs_ls_low_power(
- struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the PHY clock rate in low power mode when connected to a
- * Low Speed device in host mode. This parameter is applicable only if
- * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
- * then defaults to 6 MHZ otherwise 48 MHZ.
- *
- * 0 - 48 MHz
- * 1 - 6 MHz
- */
-extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
- int val);
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
-
-/*
- * 0 - Use cC FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
- */
-extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the Rx FIFO in host mode when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 1024)
- */
-extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Number of 4-byte words in the non-periodic Tx FIFO in host mode
- * when Dynamic FIFO sizing is enabled in the core.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the host periodic Tx FIFO when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * The maximum transfer size supported in bytes.
- * 2047 to 65,535 (default 65,535)
- */
-extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The maximum number of packets in a transfer.
- * 15 to 511 (default 511)
- */
-extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The number of host channel registers to use.
- * 1 to 16 (default 11)
- * Note: The FPGA configuration supports a maximum of 11 host channels.
- */
-extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the type of PHY interface to use. By default, the driver
- * will automatically detect the phy_type.
- *
- * 0 - Full Speed PHY
- * 1 - UTMI+ (default)
- * 2 - ULPI
- */
-extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_TYPE_PARAM_FS 0
-#define DWC2_PHY_TYPE_PARAM_UTMI 1
-#define DWC2_PHY_TYPE_PARAM_ULPI 2
-
-/*
- * Specifies the UTMI+ Data Width. This parameter is
- * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
- * PHY_TYPE, this parameter indicates the data width between
- * the MAC and the ULPI Wrapper.) Also, this parameter is
- * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
- * to "8 and 16 bits", meaning that the core has been
- * configured to work at either data path width.
- *
- * 8 or 16 bits (default 16)
- */
-extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether the ULPI operates at double or single
- * data rate. This parameter is only applicable if PHY_TYPE is
- * ULPI.
- *
- * 0 - single data rate ULPI interface with 8 bit wide data
- * bus (default)
- * 1 - double data rate ULPI interface with 4 bit wide data
- * bus
- */
-extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether to use the internal or external supply to
- * drive the vbus with a ULPI phy.
- */
-extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
-#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
-
-/*
- * Specifies whether to use the I2Cinterface for full speed PHY. This
- * parameter is only applicable if PHY_TYPE is FS.
- * 0 - No (default)
- * 1 - Yes
- */
-extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether dedicated transmit FIFOs are
- * enabled for non periodic IN endpoints in device mode
- * 0 - No
- * 1 - Yes
- */
-extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
-
-extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
-
-extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+/* The device ID match table */
+extern const struct of_device_id dwc2_of_match_table[];
extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
+/* Parameters */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+int dwc2_init_params(struct dwc2_hsotg *hsotg);
+
/*
* The following functions check the controller's OTG operation mode
* capability (GHWCFG2.OTG_MODE).
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index d85c5c9f96c1..5b228ba6045f 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
" ++OTG Interrupt: Session Request Success Status Change++\n");
gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
if (gotgctl & GOTGCTL_SESREQSCS) {
- if (hsotg->core_params->phy_type ==
+ if (hsotg->params.phy_type ==
DWC2_PHY_TYPE_PARAM_FS
- && hsotg->core_params->i2c_enable > 0) {
+ && hsotg->params.i2c_enable > 0) {
hsotg->srp_success = 1;
} else {
/* Clear Session Request */
@@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
} else {
- if (hsotg->core_params->hibernation)
+ if (hsotg->params.hibernation)
return;
if (hsotg->lx_state != DWC2_L1) {
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 55d91f24f94a..0a130916a91c 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v)
val = dwc2_readl(regs + GNPTXFSIZ);
seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
val >> FIFOSIZE_DEPTH_SHIFT,
- val & FIFOSIZE_DEPTH_MASK);
+ val & FIFOSIZE_STARTADDR_MASK);
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 24fbebc9b409..b95930f20d90 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
*/
static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
- return hsotg->g_using_dma;
+ return hsotg->params.g_dma;
+}
+
+/*
+ * using_desc_dma - return the descriptor DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using descriptor DMA.
+ */
+static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.g_dma_desc;
}
/**
@@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
unsigned int addr;
int timeout;
u32 val;
+ u32 *txfsz = hsotg->params.g_tx_fifo_size;
/* Reset fifo map if not correctly cleared during previous session */
WARN_ON(hsotg->fifo_map);
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
- dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
- dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
- (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
- hsotg->regs + GNPTXFSIZ);
+ dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
+ dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
+ (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
+ hsotg->regs + GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
@@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
*/
/* start at the end of the GNPTXFSIZ, rounded up */
- addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
+ addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
/*
* Configure fifos sizes from provided configuration and assign
@@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* given endpoint.
*/
for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
- if (!hsotg->g_tx_fifo_sz[ep])
+ if (!txfsz[ep])
continue;
val = addr;
- val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
"insufficient fifo memory");
- addr += hsotg->g_tx_fifo_sz[ep];
+ addr += txfsz[ep];
dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -303,12 +316,55 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req)
{
struct usb_request *req = &hs_req->req;
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+}
- /* ignore this if we're not moving any data */
- if (hs_req->req.length == 0)
- return;
+/*
+ * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
+ * for Control endpoint
+ * @hsotg: The device state.
+ *
+ * This function will allocate 4 descriptor chains for EP 0: 2 for
+ * Setup stage, per one for IN and OUT data/status transactions.
+ */
+static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
+{
+ hsotg->setup_desc[0] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[0],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[0])
+ goto fail;
+
+ hsotg->setup_desc[1] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[1],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[1])
+ goto fail;
+
+ hsotg->ctrl_in_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_in_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_in_desc)
+ goto fail;
+
+ hsotg->ctrl_out_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_out_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_out_desc)
+ goto fail;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ return 0;
+
+fail:
+ return -ENOMEM;
}
/**
@@ -541,6 +597,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
+ * DMA descriptor chain prepared for specific endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * depending on its descriptor chain capacity so that transfers that
+ * are too long can be split.
+ */
+static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+ int is_isoc = hs_ep->isochronous;
+ unsigned int maxsize;
+
+ if (is_isoc)
+ maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+ DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ else
+ maxsize = DEV_DMA_NBYTES_LIMIT;
+
+ /* Above size of one descriptor was chosen, multiple it */
+ maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+
+ return maxsize;
+}
+
+/*
+ * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
+ * @hs_ep: The endpoint
+ * @mask: RX/TX bytes mask to be defined
+ *
+ * Returns maximum data payload for one descriptor after analyzing endpoint
+ * characteristics.
+ * DMA descriptor transfer bytes limit depends on EP type:
+ * Control out - MPS,
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+{
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+ u32 desc_size = 0;
+
+ if (!hs_ep->index && !dir_in) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ } else if (hs_ep->isochronous) {
+ if (dir_in) {
+ desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
+ } else {
+ desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
+ }
+ } else {
+ desc_size = DEV_DMA_NBYTES_LIMIT;
+ *mask = DEV_DMA_NBYTES_MASK;
+
+ /* Round down desc_size to be mps multiple */
+ desc_size -= desc_size % mps;
+ }
+
+ return desc_size;
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @dma_buff: DMA address to use
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff,
+ unsigned int len)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ u32 mps = hs_ep->ep.maxpacket;
+ u32 maxsize = 0;
+ u32 offset = 0;
+ u32 mask = 0;
+ int i;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+ hs_ep->desc_count = (len / maxsize) +
+ ((len % maxsize) ? 1 : 0);
+ if (len == 0)
+ hs_ep->desc_count = 1;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+ << DEV_DMA_BUFF_STS_SHIFT);
+
+ if (len > maxsize) {
+ if (!hs_ep->index && !dir_in)
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ desc->status |= (maxsize <<
+ DEV_DMA_NBYTES_SHIFT & mask);
+ desc->buf = dma_buff + offset;
+
+ len -= maxsize;
+ offset += maxsize;
+ } else {
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ if (dir_in)
+ desc->status |= (len % mps) ? DEV_DMA_SHORT :
+ ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
+ if (len > maxsize)
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+
+ desc->status |=
+ len << DEV_DMA_NBYTES_SHIFT & mask;
+ desc->buf = dma_buff + offset;
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY
+ << DEV_DMA_BUFF_STS_SHIFT);
+ desc++;
+ }
+}
+
+/*
+ * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
+ * @hs_ep: The isochronous endpoint.
+ * @dma_buff: usb requests dma buffer.
+ * @len: usb request transfer length.
+ *
+ * Finds out index of first free entry either in the bottom or up half of
+ * descriptor chain depend on which is under SW control and not processed
+ * by HW. Then fills that descriptor with the data of the arrived usb request,
+ * frame info, sets Last and IOC bits increments next_desc. If filled
+ * descriptor is not the first one, removes L bit from the previous descriptor
+ * status.
+ */
+static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff, unsigned int len)
+{
+ struct dwc2_dma_desc *desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 index;
+ u32 maxsize = 0;
+ u32 mask = 0;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+ if (len > maxsize) {
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * If SW has already filled half of chain, then return and wait for
+ * the other chain to be processed by HW.
+ */
+ if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
+ return -EBUSY;
+
+ /* Increment frame number by interval for IN */
+ if (hs_ep->dir_in)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
+ hs_ep->next_desc;
+
+ /* Sanity check of calculated index */
+ if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
+ (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
+ dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
+ return -EINVAL;
+ }
+
+ desc = &hs_ep->desc_list[index];
+
+ /* Clear L bit of previous desc if more than one entries in the chain */
+ if (hs_ep->next_desc)
+ hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
+
+ dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
+ __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
+
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
+
+ desc->buf = dma_buff;
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
+ ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+ if (hs_ep->dir_in) {
+ desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+ DEV_DMA_ISOC_PID_MASK) |
+ ((len % hs_ep->ep.maxpacket) ?
+ DEV_DMA_SHORT : 0) |
+ ((hs_ep->target_frame <<
+ DEV_DMA_ISOC_FRNUM_SHIFT) &
+ DEV_DMA_ISOC_FRNUM_MASK);
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
+
+ /* Update index of last configured entry in the chain */
+ hs_ep->next_desc++;
+
+ return 0;
+}
+
+/*
+ * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
+ * @hs_ep: The isochronous endpoint.
+ *
+ * Prepare first descriptor chain for isochronous endpoints. Afterwards
+ * write DMA address to HW and enable the endpoint.
+ *
+ * Switch between descriptor chains via isoc_chain_num to give SW opportunity
+ * to prepare second descriptor chain while first one is being processed by HW.
+ */
+static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req, *treq;
+ int index = hs_ep->index;
+ int ret;
+ u32 dma_reg;
+ u32 depctl;
+ u32 ctrl;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret) {
+ dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
+ break;
+ }
+ }
+
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+}
+
+/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
@@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
unsigned length;
unsigned packets;
unsigned maxreq;
+ unsigned int dma_reg;
if (index != 0) {
if (hs_ep->req && !continuing) {
@@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
}
+ dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
@@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
ureq->length, ureq->actual);
- maxreq = get_ep_limit(hs_ep);
+ if (!using_desc_dma(hsotg))
+ maxreq = get_ep_limit(hs_ep);
+ else
+ maxreq = dwc2_gadget_get_chain_limit(hs_ep);
+
if (length > maxreq) {
int round = maxreq % hs_ep->ep.maxpacket;
@@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
/* store the request as the current one we're doing */
hs_ep->req = hs_req;
- /* write size / packets */
- dwc2_writel(epsize, hsotg->regs + epsize_reg);
+ if (using_desc_dma(hsotg)) {
+ u32 offset = 0;
+ u32 mps = hs_ep->ep.maxpacket;
- if (using_dma(hsotg) && !continuing) {
- unsigned int dma_reg;
+ /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
+ if (!dir_in) {
+ if (!index)
+ length = mps;
+ else if (length % mps)
+ length += (mps - (length % mps));
+ }
/*
- * write DMA address to control register, buffer already
- * synced by dwc2_hsotg_ep_queue().
+ * If more data to send, adjust DMA for EP0 out data stage.
+ * ureq->dma stays unchanged, hence increment it by already
+ * passed passed data count before starting new transaction.
*/
+ if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+ continuing)
+ offset = ureq->actual;
- dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
- dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+ /* Fill DDMA chain entries */
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+ length);
- dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
- __func__, &ureq->dma, dma_reg);
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
+ __func__, (u32)hs_ep->desc_list_dma, dma_reg);
+ } else {
+ /* write size / packets */
+ dwc2_writel(epsize, hsotg->regs + epsize_reg);
+
+ if (using_dma(hsotg) && !continuing && (length != 0)) {
+ /*
+ * write DMA address to control register, buffer
+ * already synced by dwc2_hsotg_ep_queue().
+ */
+
+ dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
+ }
}
if (hs_ep->isochronous && hs_ep->interval == 1) {
@@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct usb_request *req)
{
- struct dwc2_hsotg_req *hs_req = our_req(req);
int ret;
- /* if the length is zero, ignore the DMA data */
- if (hs_req->req.length == 0)
- return 0;
-
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
@@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
return false;
}
+/*
+ * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
+ * @hsotg: The driver state
+ * @hs_ep: the ep descriptor chain is for
+ *
+ * Called to update EP0 structure's pointers depend on stage of
+ * control transfer.
+ */
+static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ switch (hsotg->ep0_state) {
+ case DWC2_EP0_SETUP:
+ case DWC2_EP0_STATUS_OUT:
+ hs_ep->desc_list = hsotg->setup_desc[0];
+ hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
+ break;
+ case DWC2_EP0_DATA_IN:
+ case DWC2_EP0_STATUS_IN:
+ hs_ep->desc_list = hsotg->ctrl_in_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
+ break;
+ case DWC2_EP0_DATA_OUT:
+ hs_ep->desc_list = hsotg->ctrl_out_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
+ break;
+ default:
+ dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
+ hsotg->ep0_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
if (ret)
return ret;
}
+ /* If using descriptor DMA configure EP0 descriptor chain pointers */
+ if (using_desc_dma(hs) && !hs_ep->index) {
+ ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
+ if (ret)
+ return ret;
+ }
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
+ /*
+ * Handle DDMA isochronous transfers separately - just add new entry
+ * to the half of descriptor chain that is not processed by HW.
+ * Transfer will be started once SW gets either one of NAK or
+ * OutTknEpDis interrupts.
+ */
+ if (using_desc_dma(hs) && hs_ep->isochronous &&
+ hs_ep->target_frame != TARGET_FRAME_INITIAL) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret)
+ dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
+
+ return 0;
+ }
+
if (first) {
if (!hs_ep->isochronous) {
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
*/
static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
{
- if (list_empty(&hs_ep->queue))
- return NULL;
-
- return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue);
+ return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
+ queue);
}
/**
@@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
if (hs_ep->dir_in)
dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
- index);
+ index);
else
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
- index);
+ index);
+ if (using_desc_dma(hsotg)) {
+ /* Not specific buffer needed for ep0 ZLP */
+ dma_addr_t dma = hs_ep->desc_list_dma;
- dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(0), hsotg->regs +
- epsiz_reg);
+ dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+ } else {
+ dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+ epsiz_reg);
+ }
ctrl = dwc2_readl(hsotg->regs + epctl_reg);
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
@@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
spin_lock(&hsotg->lock);
}
+ /* In DDMA don't need to proceed to starting of next ISOC request */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous)
+ return;
+
/*
* Look to see if there is anything else to do. Note, the completion
* of the previous request may have caused a new request to be started
@@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
}
}
+/*
+ * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
+ * @hs_ep: The endpoint the request was on.
+ *
+ * Get first request from the ep queue, determine descriptor on which complete
+ * happened. SW based on isoc_chain_num discovers which half of the descriptor
+ * chain is currently in use by HW, adjusts dma_address and calculates index
+ * of completed descriptor based on the value of DEPDMA register. Update actual
+ * length of request, giveback to gadget.
+ */
+static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ struct usb_request *ureq;
+ int index;
+ dma_addr_t dma_addr;
+ u32 dma_reg;
+ u32 depdma;
+ u32 desc_sts;
+ u32 mask;
+
+ hs_req = get_ep_head(hs_ep);
+ if (!hs_req) {
+ dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
+ return;
+ }
+ ureq = &hs_req->req;
+
+ dma_addr = hs_ep->desc_list_dma;
+
+ /*
+ * If lower half of descriptor chain is currently use by SW,
+ * that means higher half is being processed by HW, so shift
+ * DMA address to higher half of descriptor chain.
+ */
+ if (!hs_ep->isoc_chain_num)
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2);
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
+ depdma = dwc2_readl(hsotg->regs + dma_reg);
+
+ index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
+ desc_sts = hs_ep->desc_list[index].status;
+
+ mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
+ DEV_DMA_ISOC_RX_NBYTES_MASK;
+ ureq->actual = ureq->length -
+ ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
+
+ /* Adjust actual length for ISOC Out if length is not align of 4 */
+ if (!hs_ep->dir_in && ureq->length & 0x3)
+ ureq->actual += 4 - (ureq->length & 0x3);
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+}
+
+/*
+ * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
+ * @hs_ep: The isochronous endpoint to be re-enabled.
+ *
+ * If ep has been disabled due to last descriptor servicing (IN endpoint) or
+ * BNA (OUT endpoint) check the status of other half of descriptor chain that
+ * was under SW control till HW was busy and restart the endpoint if needed.
+ */
+static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 depctl;
+ u32 dma_reg;
+ u32 ctrl;
+ u32 dma_addr = hs_ep->desc_list_dma;
+ unsigned char index = hs_ep->index;
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+
+ /*
+ * EP was disabled if HW has processed last descriptor or BNA was set.
+ * So restart ep if SW has prepared new descriptor chain in ep_queue
+ * routine while HW was busy.
+ */
+ if (!(ctrl & DXEPCTL_EPENA)) {
+ if (!hs_ep->next_desc) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
+ __func__);
+ return;
+ }
+
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2) *
+ hs_ep->isoc_chain_num;
+ dwc2_writel(dma_addr, hsotg->regs + dma_reg);
+
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+
+ dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
+ __func__);
+ }
+}
+
/**
* dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
* @hsotg: The device state.
@@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
dwc2_writel(ctrl, hsotg->regs + epctl_reg);
}
+/*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+ *
+ * Iterate over endpoints descriptor chain and get info on bytes remained
+ * in DMA descriptors after transfer has completed. Used for non isoc EPs.
+ */
+static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned int bytes_rem = 0;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ int i;
+ u32 status;
+
+ if (!desc)
+ return -EINVAL;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ status = desc->status;
+ bytes_rem += status & DEV_DMA_NBYTES_MASK;
+
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
+ }
+
+ return bytes_rem;
+}
+
/**
* dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
* @hsotg: The device instance
@@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
return;
}
+ if (using_desc_dma(hsotg))
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+
if (using_dma(hsotg)) {
unsigned size_done;
@@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
*/
}
- if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* DDMA IN status phase will start from StsPhseRcvd interrupt */
+ if (!using_desc_dma(hsotg) && epnum == 0 &&
+ hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN */
dwc2_hsotg_ep0_zlp(hsotg, true);
return;
@@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
* @hsotg: The driver state.
* @ep: The index number of the endpoint
* @mps: The maximum packet size in bytes
+ * @mc: The multicount value
*
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
*/
static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
- unsigned int ep, unsigned int mps, unsigned int dir_in)
+ unsigned int ep, unsigned int mps,
+ unsigned int mc, unsigned int dir_in)
{
struct dwc2_hsotg_ep *hs_ep;
void __iomem *regs = hsotg->regs;
- u32 mpsval;
- u32 mcval;
u32 reg;
hs_ep = index_to_ep(hsotg, ep, dir_in);
@@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
return;
if (ep == 0) {
+ u32 mps_bytes = mps;
+
/* EP0 is a special case */
- mpsval = dwc2_hsotg_ep0_mps(mps);
- if (mpsval > 3)
+ mps = dwc2_hsotg_ep0_mps(mps_bytes);
+ if (mps > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mps;
+ hs_ep->ep.maxpacket = mps_bytes;
hs_ep->mc = 1;
} else {
- mpsval = mps & DXEPCTL_MPS_MASK;
- if (mpsval > 1024)
+ if (mps > 1024)
goto bad_mps;
- mcval = ((mps >> 11) & 0x3) + 1;
- hs_ep->mc = mcval;
- if (mcval > 3)
+ hs_ep->mc = mc;
+ if (mc > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mpsval;
+ hs_ep->ep.maxpacket = mps;
}
if (dir_in) {
reg = dwc2_readl(regs + DIEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DIEPCTL(ep));
} else {
reg = dwc2_readl(regs + DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DOEPCTL(ep));
}
@@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
/* Finish ZLP handling for IN EP0 transactions */
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
dev_dbg(hsotg->dev, "zlp packet sent\n");
+
+ /*
+ * While send zlp for DWC2_EP0_STATUS_IN EP direction was
+ * changed to IN. Change back to complete OUT transfer request
+ */
+ hs_ep->dir_in = 0;
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
if (hsotg->test_mode) {
int ret;
@@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
* past the end of the buffer (DMA transfers are always 32bit
* aligned).
*/
-
- size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ if (using_desc_dma(hsotg)) {
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+ if (size_left < 0)
+ dev_err(hsotg->dev, "error parsing DDMA results %d\n",
+ size_left);
+ } else {
+ size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ }
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
@@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
struct dwc2_hsotg *hsotg = ep->parent;
int dir_in = ep->dir_in;
u32 doepmsk;
+ u32 tmp;
if (dir_in || !ep->isochronous)
return;
+ /*
+ * Store frame in which irq was asserted here, as
+ * it can change while completing request below.
+ */
+ tmp = dwc2_hsotg_read_frameno(hsotg);
+
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+ if (using_desc_dma(hsotg)) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ /* Start first ISO Out */
+ ep->target_frame = tmp;
+ dwc2_gadget_start_isoc_ddma(ep);
+ }
+ return;
+ }
+
if (ep->interval > 1 &&
ep->target_frame == TARGET_FRAME_INITIAL) {
u32 dsts;
@@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+
+ if (using_desc_dma(hsotg)) {
+ dwc2_gadget_start_isoc_ddma(hs_ep);
+ return;
+ }
+
if (hs_ep->interval > 1) {
u32 ctrl = dwc2_readl(hsotg->regs +
DIEPCTL(hs_ep->index));
@@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
- if (ints & DXEPINT_STSPHSERCVD)
- dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+ /*
+ * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
+ * stage and xfercomplete was generated without SETUP phase done
+ * interrupt. SW should parse received setup packet only after host's
+ * exit from setup phase of control transfer.
+ */
+ if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
+ hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
+ ints &= ~DXEPINT_XFERCOMPL;
if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
@@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
__func__, dwc2_readl(hsotg->regs + epctl_reg),
dwc2_readl(hsotg->regs + epsiz_reg));
- /*
- * we get OutDone from the FIFO, so we only need to look
- * at completing IN requests here
- */
- if (dir_in) {
+ /* In DDMA handle isochronous requests separately */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ /* Try to start next isoc request */
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ } else if (dir_in) {
+ /*
+ * We get OutDone from the FIFO, so we only
+ * need to look at completing IN requests here
+ * if operating slave mode
+ */
if (hs_ep->isochronous && hs_ep->interval > 1)
dwc2_gadget_incr_frame_num(hs_ep);
@@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
}
}
+ if (ints & DXEPINT_STSPHSERCVD) {
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
+
+ /* Move to STATUS IN for DDMA */
+ if (using_desc_dma(hsotg))
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
if (ints & DXEPINT_BACK2BACKSETUP)
dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+ if (ints & DXEPINT_BNAINTR) {
+ dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
+
+ /*
+ * Try to start next isoc request, if any.
+ * Sometimes the endpoint remains enabled after BNA interrupt
+ * assertion, which is not expected, hence we can enter here
+ * couple of times.
+ */
+ if (hs_ep->isochronous)
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ }
+
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
if (ints & DXEPINT_INTKNTXFEMP) {
@@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
case DSTS_ENUMSPD_LS:
hsotg->gadget.speed = USB_SPEED_LOW;
+ ep0_mps = 8;
+ ep_mps = 8;
/*
* note, we don't actually support LS in this driver at the
* moment, and the documentation seems to imply that it isn't
@@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
if (ep0_mps) {
int i;
/* Initialize ep0 for both in and out directions */
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
for (i = 1; i < hsotg->num_of_eps; i++) {
if (hsotg->eps_in[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 1);
if (hsotg->eps_out[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 0);
}
}
@@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 intmsk;
u32 val;
u32 usbcfg;
+ u32 dcfg = 0;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
GUSBCFG_HNPCAP);
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT);
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
+ (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
+ /* FS/LS Dedicated Transceiver Interface */
+ usbcfg |= GUSBCFG_PHYSEL;
+ } else {
+ /* set the PLL on, remove the HNP/SRP and set the PHY */
+ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (val << GUSBCFG_USBTRDTIM_SHIFT);
+ }
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
@@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
if (!is_usb_reset)
__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
- dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
+ dcfg |= DCFG_EPMISCNT(1);
+
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ dcfg |= DCFG_DEVSPD_LS;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
+ dcfg |= DCFG_DEVSPD_FS48;
+ else
+ dcfg |= DCFG_DEVSPD_FS;
+ break;
+ default:
+ dcfg |= DCFG_DEVSPD_HS;
+ }
+
+ dwc2_writel(dcfg, hsotg->regs + DCFG);
/* Clear any pending OTG interrupts */
dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
@@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
GINTSTS_USBRST | GINTSTS_RESETDET |
GINTSTS_ENUMDONE | GINTSTS_OTGINT |
- GINTSTS_USBSUSP | GINTSTS_WKUPINT |
- GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+ GINTSTS_USBSUSP | GINTSTS_WKUPINT;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (!using_desc_dma(hsotg))
+ intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
dwc2_writel(intmsk, hsotg->regs + GINTMSK);
- if (using_dma(hsotg))
+ if (using_dma(hsotg)) {
dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
(GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
hsotg->regs + GAHBCFG);
- else
+
+ /* Set DDMA mode support in the core if needed */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
+
+ } else {
dwc2_writel(((hsotg->dedicated_fifos) ?
(GAHBCFG_NP_TXF_EMP_LVL |
GAHBCFG_P_TXF_EMP_LVL) : 0) |
GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+ }
/*
* If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
@@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
- * DMA mode we may need this.
+ * DMA mode we may need this and StsPhseRcvd.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
+ dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+ DOEPMSK_STSPHSERCVDMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
- DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
+ DOEPMSK_SETUPMSK,
hsotg->regs + DOEPMSK);
+ /* Enable BNA interrupt for DDMA */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
+
dwc2_writel(0, hsotg->regs + DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -2935,6 +3608,95 @@ irq_retry:
return IRQ_HANDLED;
}
+static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
+ u32 bit, u32 timeout)
+{
+ u32 i;
+
+ for (i = 0; i < timeout; i++) {
+ if (dwc2_readl(hs_otg->regs + reg) & bit)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 epctrl_reg;
+ u32 epint_reg;
+
+ epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
+ DOEPCTL(hs_ep->index);
+ epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
+ DOEPINT(hs_ep->index);
+
+ dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
+ hs_ep->name);
+
+ if (hs_ep->dir_in) {
+ if (hsotg->dedicated_fifos || hs_ep->periodic) {
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
+ DXEPINT_INEPNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DIEPINT.NAKEFF\n",
+ __func__);
+ } else {
+ __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GINNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout GINTSTS.GINNAKEFF\n",
+ __func__);
+ }
+ } else {
+ if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+
+ /* Wait for global nak to take effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GOUTNAKEFF, 100))
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
+ __func__);
+ }
+
+ /* Disable ep */
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+
+ /* Wait for ep to be disabled */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DOEPCTL.EPDisable\n", __func__);
+
+ /* Clear EPDISBLD interrupt */
+ __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
+
+ if (hs_ep->dir_in) {
+ unsigned short fifo_index;
+
+ if (hsotg->dedicated_fifos || hs_ep->periodic)
+ fifo_index = hs_ep->fifo_index;
+ else
+ fifo_index = 0;
+
+ /* Flush TX FIFO */
+ dwc2_flush_tx_fifo(hsotg, fifo_index);
+
+ /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
+ if (!hsotg->dedicated_fifos && !hs_ep->periodic)
+ __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+
+ } else {
+ /* Remove global NAKs */
+ __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ }
+}
+
/**
* dwc2_hsotg_ep_enable - enable the given endpoint
* @ep: The USB endpint to configure
@@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
u32 epctrl_reg;
u32 epctrl;
u32 mps;
+ u32 mc;
u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
@@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
}
mps = usb_endpoint_maxp(desc);
+ mc = usb_endpoint_maxp_mult(desc);
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
@@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
+ /* Allocate DMA descriptor chain for non-ctrl endpoints */
+ if (using_desc_dma(hsotg)) {
+ hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ &hs_ep->desc_list_dma, GFP_KERNEL);
+ if (!hs_ep->desc_list) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+ }
+
spin_lock_irqsave(&hsotg->lock, flags);
epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
@@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
epctrl |= DXEPCTL_USBACTEP;
/* update the endpoint state */
- dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
/* default, set to non-periodic */
hs_ep->isochronous = 0;
@@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->isochronous = 1;
hs_ep->interval = 1 << (desc->bInterval - 1);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ hs_ep->isoc_chain_num = 0;
+ hs_ep->next_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
mask = dwc2_readl(hsotg->regs + DIEPMSK);
@@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_err(hsotg->dev,
"%s: No suitable fifo found\n", __func__);
ret = -ENOMEM;
- goto error;
+ goto error1;
}
hsotg->fifo_map |= 1 << fifo_index;
epctrl |= DXEPCTL_TXFNUM(fifo_index);
@@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
/* enable the endpoint interrupt */
dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
-error:
+error1:
spin_unlock_irqrestore(&hsotg->lock, flags);
+
+error2:
+ if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
return ret;
}
@@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ /* Remove DMA memory allocated for non-control Endpoints */
+ if (using_desc_dma(hsotg)) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+
ctrl &= ~DXEPCTL_EPENA;
ctrl &= ~DXEPCTL_USBACTEP;
ctrl |= DXEPCTL_SNAK;
@@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
return false;
}
-static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
- u32 bit, u32 timeout)
-{
- u32 i;
-
- for (i = 0; i < timeout; i++) {
- if (dwc2_readl(hs_otg->regs + reg) & bit)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
-}
-
-static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
- struct dwc2_hsotg_ep *hs_ep)
-{
- u32 epctrl_reg;
- u32 epint_reg;
-
- epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
- DOEPCTL(hs_ep->index);
- epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
- DOEPINT(hs_ep->index);
-
- dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
- hs_ep->name);
- if (hs_ep->dir_in) {
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
- /* Wait for Nak effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
- DXEPINT_INEPNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DIEPINT.NAKEFF\n", __func__);
- } else {
- if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
- __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
-
- /* Wait for global nak to take effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
- GINTSTS_GOUTNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__);
- }
-
- /* Disable ep */
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
-
- /* Wait for ep to be disabled */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DOEPCTL.EPDisable\n", __func__);
-
- if (hs_ep->dir_in) {
- if (hsotg->dedicated_fifos) {
- dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) |
- GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL);
- /* Wait for fifo flush */
- if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
- GRSTCTL_TXFFLSH, 100))
- dev_warn(hsotg->dev,
- "%s: timeout flushing fifos\n",
- __func__);
- }
- /* TODO: Flush shared tx fifo */
- } else {
- /* Remove global NAKs */
- __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
- }
-}
-
/**
* dwc2_hsotg_ep_dequeue - dequeue given endpoint
* @ep: The endpoint to dequeue.
@@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
- usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
+
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
+ usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
+ else
+ usb_ep_set_maxpacket_limit(&hs_ep->ep,
+ epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
if (epnum == 0) {
hs_ep->ep.caps.type_control = true;
} else {
- hs_ep->ep.caps.type_iso = true;
- hs_ep->ep.caps.type_bulk = true;
+ if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
+ hs_ep->ep.caps.type_iso = true;
+ hs_ep->ep.caps.type_bulk = true;
+ }
hs_ep->ep.caps.type_int = true;
}
@@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
#endif
}
-#ifdef CONFIG_OF
-static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
-{
- struct device_node *np = hsotg->dev->of_node;
- u32 len = 0;
- u32 i = 0;
-
- /* Enable dma if requested in device tree */
- hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
-
- /*
- * Register TX periodic fifo size per endpoint.
- * EP0 is excluded since it has no fifo configuration.
- */
- if (!of_find_property(np, "g-tx-fifo-size", &len))
- goto rx_fifo;
-
- len /= sizeof(u32);
-
- /* Read tx fifo sizes other than ep0 */
- if (of_property_read_u32_array(np, "g-tx-fifo-size",
- &hsotg->g_tx_fifo_sz[1], len))
- goto rx_fifo;
-
- /* Add ep0 */
- len++;
-
- /* Make remaining TX fifos unavailable */
- if (len < MAX_EPS_CHANNELS) {
- for (i = len; i < MAX_EPS_CHANNELS; i++)
- hsotg->g_tx_fifo_sz[i] = 0;
- }
-
-rx_fifo:
- /* Register RX fifo size */
- of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
-
- /* Register NPTX fifo size */
- of_property_read_u32(np, "g-np-tx-fifo-size",
- &hsotg->g_np_g_tx_fifo_sz);
-}
-#else
-static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
-#endif
-
/**
* dwc2_gadget_init - init function for gadget
* @dwc2: The data structure for the DWC2 driver.
@@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
- int i;
- u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
-
- /* Initialize to legacy fifo configuration values */
- hsotg->g_rx_fifo_sz = 2048;
- hsotg->g_np_g_tx_fifo_sz = 1024;
- memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
- /* Device tree specific probe */
- dwc2_hsotg_of_probe(hsotg);
-
- /* Check against largest possible value. */
- if (hsotg->g_np_g_tx_fifo_sz >
- hsotg->hw_params.dev_nperio_tx_fifo_size) {
- dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n",
- hsotg->g_np_g_tx_fifo_sz,
- hsotg->hw_params.dev_nperio_tx_fifo_size);
- hsotg->g_np_g_tx_fifo_sz =
- hsotg->hw_params.dev_nperio_tx_fifo_size;
- }
/* Dump fifo information */
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
- hsotg->g_np_g_tx_fifo_sz);
- dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
- for (i = 0; i < MAX_EPS_CHANNELS; i++)
- dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
- hsotg->g_tx_fifo_sz[i]);
+ hsotg->params.g_np_tx_fifo_size);
+ dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
@@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
if (!hsotg->ep0_buff)
return -ENOMEM;
+ if (using_desc_dma(hsotg)) {
+ ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
+ if (ret < 0)
+ return ret;
+ }
+
ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
if (ret < 0) {
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578005..911c3b36ac06 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
intmsk |= GINTSTS_RXFLVL;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
@@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) ||
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ hsotg->params.ulpi_fs_ls > 0) ||
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
val = HCFG_FSLSPCLKSEL_48_MHZ;
} else {
@@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (dwc2_is_host_mode(hsotg))
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->i2c_enable > 0) {
+ if (hsotg->params.i2c_enable > 0) {
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
@@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
* so only program the first time. Do a soft reset immediately after
* setting phyif.
*/
- switch (hsotg->core_params->phy_type) {
+ switch (hsotg->params.phy_type) {
case DWC2_PHY_TYPE_PARAM_ULPI:
/* ULPI interface */
dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
- if (hsotg->core_params->phy_ulpi_ddr > 0)
+ if (hsotg->params.phy_ulpi_ddr > 0)
usbcfg |= GUSBCFG_DDRSEL;
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
- if (hsotg->core_params->phy_utmi_width == 16)
+ if (hsotg->params.phy_utmi_width == 16)
usbcfg |= GUSBCFG_PHYIF16;
break;
default:
@@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
u32 usbcfg;
int retval = 0;
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* If FS mode with FS PHY */
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS/LS mode with FS/LS PHY */
retval = dwc2_fs_phy_init(hsotg, select_phy);
if (retval)
return retval;
@@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) {
+ hsotg->params.ulpi_fs_ls > 0) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
@@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- if (hsotg->core_params->ahbcfg != -1) {
+ if (hsotg->params.ahbcfg != -1) {
ahbcfg &= GAHBCFG_CTRL_MASK;
- ahbcfg |= hsotg->core_params->ahbcfg &
+ ahbcfg |= hsotg->params.ahbcfg &
~GAHBCFG_CTRL_MASK;
}
break;
@@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
break;
}
- dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
- hsotg->core_params->dma_enable,
- hsotg->core_params->dma_desc_enable);
+ dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
+ hsotg->params.host_dma,
+ hsotg->params.dma_desc_enable);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0)
dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
else
dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
} else {
dev_dbg(hsotg->dev, "Using Slave mode\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
}
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
ahbcfg |= GAHBCFG_DMA_EN;
dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
@@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- if (hsotg->core_params->otg_cap ==
+ if (hsotg->params.otg_cap ==
DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_HNPCAP;
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
@@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
if (!params->enable_dynamic_fifo)
@@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
dwc2_readl(hsotg->regs + HPTXFSIZ));
- if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
+ if (hsotg->params.en_multiple_tx_fifo > 0 &&
hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
/*
* Global DFIFOCFG calculation for Host mode -
@@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
#ifdef VERBOSE_DEBUG
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_qh *qh;
u32 hcchar;
u32 hcsplt;
@@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
#endif /* VERBOSE_DEBUG */
}
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+static void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+ _dwc2_hcd_start(hcd);
+}
+
+static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = 0;
+}
+
+static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+ int *hub_addr, int *hub_port)
+{
+ struct urb *urb = context;
+
+ if (urb->dev->tt)
+ *hub_addr = urb->dev->tt->hub->devnum;
+ else
+ *hub_addr = 0;
+ *hub_port = urb->dev->ttport;
+}
+
/*
* =========================================================================
* Low Level Host Channel Access Functions
@@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
* For Descriptor DMA mode core halts the channel on AHB error.
* Interrupt is not required.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcintmsk |= HCINTMSK_AHBERR;
@@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
{
u32 intmsk;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
@@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
/* No need to set the bit in DDMA for disabling the channel */
/* TODO check it everywhere channel is disabled */
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
}
hcchar |= HCCHAR_CHDIS;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
- (hsotg->core_params->host_perio_tx_fifo_size -
+ (hsotg->params.host_perio_tx_fifo_size -
fifo_space);
/*
@@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
- u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
- u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
+ u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
+ u16 max_hc_pkt_count = hsotg->params.max_packet_count;
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
@@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) {
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
@@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
TSIZ_SC_MC_PID_SHIFT);
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_writel((u32)chan->xfer_dma,
hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan))
@@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
chan->xfer_started = 1;
chan->requests++;
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
!chan->ep_is_in && chan->xfer_len > 0)
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
@@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
/* Must be called with interrupt disabled and spinlock held */
static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
{
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_host_chan *channel;
u32 hcchar;
int i;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
@@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
channel->qh = NULL;
}
/* All channels have been freed, mark them available */
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
* Free the QTD and clean up the associated QH. Leave the QH in the
* schedule if it has any remaining QTDs.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
u8 in_process = urb_qtd->in_process;
dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
@@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
- if (hsotg->core_params->phy_ulpi_ext_vbus ==
+ if (hsotg->params.phy_ulpi_ext_vbus ==
DWC2_PHY_ULPI_EXTERNAL_VBUS)
usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
/* Set external TS Dline pulsing bit if needed */
usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
- if (hsotg->core_params->ts_dline > 0)
+ if (hsotg->params.ts_dline > 0)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
@@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Program the GOTGCTL register */
otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
- if (hsotg->core_params->otg_ver > 0)
+ if (hsotg->params.otg_ver > 0)
otgctl |= GOTGCTL_OTGVER;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
+ dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
@@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/* Initialize Host Configuration Register */
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_FSLSSUPP;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
* runtime. This bit needs to be programmed during initial configuration
* and its value must not be changed during runtime.
*/
- if (hsotg->core_params->reload_ctl > 0) {
+ if (hsotg->params.reload_ctl > 0) {
hfir = dwc2_readl(hsotg->regs + HFIR);
hfir |= HFIR_RLDCTRL;
dwc2_writel(hfir, hsotg->regs + HFIR);
}
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
u32 op_mode = hsotg->hw_params.op_mode;
if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
@@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
"Hardware does not support descriptor DMA mode -\n");
dev_err(hsotg->dev,
"falling back to buffer DMA mode.\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
} else {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
@@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
int num_channels, i;
u32 hcchar;
/* Flush out any leftover queued requests */
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
hcchar &= ~HCCHAR_CHENA;
@@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hsotg->flags.d32 = 0;
hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hc_list_entry)
list_del_init(&chan->hc_list_entry);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
@@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->ep_is_in = 0;
chan->data_pid_start = DWC2_HC_PID_SETUP;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->setup_dma;
else
chan->xfer_buf = urb->setup_packet;
@@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->data_pid_start = DWC2_HC_PID_DATA1;
chan->xfer_len = 0;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
@@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
case USB_ENDPOINT_XFER_ISOC:
chan->ep_type = USB_ENDPOINT_XFER_ISOC;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
break;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
frame_desc->status = 0;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
chan->xfer_dma = urb->dma;
chan->xfer_dma += frame_desc->offset +
qtd->isoc_split_offset;
@@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->dma + urb->actual_length;
else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
@@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
chan->multi_count = dwc2_hb_mult(qh->maxp);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
chan->desc_list_addr = qh->desc_list_dma;
chan->desc_list_sz = qh->desc_list_sz;
}
@@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
while (qh_ptr != &hsotg->periodic_sched_ready) {
if (list_empty(&hsotg->free_hc_list))
break;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels <= 1)
break;
hsotg->available_host_channels--;
@@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
* schedule. Some free host channels may not be used if they are
* reserved for periodic transfers.
*/
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
qh_ptr = hsotg->non_periodic_sched_inactive.next;
while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
- if (hsotg->core_params->uframe_sched <= 0 &&
+ if (hsotg->params.uframe_sched <= 0 &&
hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
if (list_empty(&hsotg->free_hc_list))
break;
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels < 1)
break;
hsotg->available_host_channels--;
@@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
else
ret_val = DWC2_TRANSACTION_ALL;
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
hsotg->non_periodic_channels++;
}
@@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
@@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* The flag prevents any halts to get into the request queue in
* the middle of multiple high-bandwidth packets getting queued.
*/
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
@@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* controller automatically handles multiple packets for
* high-bandwidth transfers.
*/
- if (hsotg->core_params->dma_enable > 0 || status == 0 ||
+ if (hsotg->params.host_dma > 0 || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
qh_ptr = qh_ptr->next;
/*
@@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
exit:
if (no_queue_space || no_fifo_space ||
- (hsotg->core_params->dma_enable <= 0 &&
+ (hsotg->params.host_dma <= 0 &&
!list_empty(&hsotg->periodic_sched_assigned))) {
/*
* May need to queue more transactions as the request
@@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
- if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
+ if (hsotg->params.host_dma <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
}
@@ -3106,7 +3137,7 @@ next:
hsotg->non_periodic_qh_ptr->next;
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
@@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
* If hibernation is supported, Phy clock will be suspended
* after registers are backuped.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
/* Suspend the Phy Clock */
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
@@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
* If hibernation is supported, Phy clock is already resumed
* after registers restore.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
@@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_TEST;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
/*
* Enable descriptor DMA only if a full speed
* device is connected.
@@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 hcfg;
dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
- hsotg->core_params->dma_desc_enable = 1;
+ hsotg->params.dma_desc_enable = 1;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
u32 p_tx_status;
int i;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
dev_dbg(hsotg->dev, "\n");
dev_dbg(hsotg->dev,
"************************************************************\n");
@@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
return p->hsotg;
}
-static int _dwc2_hcd_start(struct usb_hcd *hcd);
-
-void dwc2_host_start(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
- _dwc2_hcd_start(hcd);
-}
-
-void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = 0;
-}
-
-void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
- int *hub_port)
-{
- struct urb *urb = context;
-
- if (urb->dev->tt)
- *hub_addr = urb->dev->tt->hub->devnum;
- else
- *hub_addr = 0;
- *hub_port = urb->dev->ttport;
-}
-
/**
* dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
*
@@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
goto skip_power_saving;
/*
@@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
if (hsotg->lx_state != DWC2_L2)
goto unlock;
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
@@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
case PIPE_ISOCHRONOUS:
pipetype = "ISOCHRONOUS";
break;
- default:
- pipetype = "UNKNOWN";
- break;
}
dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
@@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case PIPE_INTERRUPT:
ep_type = USB_ENDPOINT_XFER_INT;
break;
- default:
- dev_warn(hsotg->dev, "Wrong ep type\n");
}
dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
@@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
}
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->status_buf) {
dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
hsotg->status_buf,
@@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
- if (hsotg->core_params->dma_enable > 0 &&
+ if (hsotg->params.host_dma > 0 &&
hsotg->dev->dma_mask == NULL) {
dev_warn(hsotg->dev,
"dma_mask not set, disabling DMA\n");
- hsotg->core_params->dma_enable = 0;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.host_dma = 0;
+ hsotg->params.dma_desc_enable = 0;
}
/* Set device flags indicating whether the HCD supports DMA */
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
@@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
if (!hcd)
goto error1;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
hcd->self.uses_dma = 0;
hcd->has_tt = 1;
@@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* in the controller. Initialize the channel descriptor array.
*/
INIT_LIST_HEAD(&hsotg->free_hc_list);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
for (i = 0; i < num_channels; i++) {
@@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* done after usb_add_hcd since that function allocates the DMA buffer
* pool.
*/
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
DWC2_HCD_STATUS_BUF_SIZE,
&hsotg->status_buf_dma, GFP_KERNEL);
@@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* DMA mode.
* Alignment must be set to 512 bytes.
*/
- if (hsotg->core_params->dma_desc_enable ||
- hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_enable ||
+ hsotg->params.dma_desc_fs_enable) {
hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
NULL);
if (!hsotg->desc_gen_cache) {
@@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
if (!hsotg->desc_hsisoc_cache) {
dev_err(hsotg->dev,
@@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
}
@@ -5184,7 +5181,6 @@ error3:
error2:
usb_put_hcd(hcd);
error1:
- kfree(hsotg->core_params);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
@@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
hr->hprt0 = dwc2_read_hprt0(hsotg);
@@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7758bfb644ff..1ed5fa2beff4 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -348,7 +348,7 @@ struct dwc2_qh {
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
- struct dwc2_hcd_dma_desc *desc_list;
+ struct dwc2_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 desc_list_sz;
u32 *n_bytes;
@@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
#define URB_SEND_ZERO_PACKET 0x2
/* Host driver callbacks */
-
-extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
- int *hub_addr, int *hub_port);
extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
void *context, gfp_t mem_flags,
int *ttport);
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 0e1d42b5dec5..cf0367768cb3 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
else
desc_cache = hsotg->desc_gen_cache;
- qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+ qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh);
qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
@@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan = qh->channel;
if (dwc2_qh_is_non_per(qh)) {
- if (hsotg->core_params->uframe_sched > 0)
+ if (hsotg->params.uframe_sched > 0)
hsotg->available_host_channels++;
else
hsotg->non_periodic_channels--;
@@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
qh->ntd = 0;
if (qh->desc_list)
- memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
+ memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh));
}
@@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
- (hsotg->core_params->uframe_sched > 0 ||
+ (hsotg->params.uframe_sched > 0 ||
!hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
@@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u32 max_xfer_size,
u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
memset(dma_desc, 0, sizeof(*dma_desc));
@@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
@@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
#else
@@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
#endif
}
@@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd, struct dwc2_qh *qh,
int n_desc)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
- if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
- len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
+ if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
+ len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
@@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (n_desc * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
/*
@@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
((n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
@@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
n_desc - 1, &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
@@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
&qh->desc_list[0]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma,
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
chan->ntd = n_desc;
@@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
@@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
@@ -1066,7 +1066,7 @@ stop_scan:
static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
- struct dwc2_hcd_dma_desc *dma_desc,
+ struct dwc2_dma_desc *dma_desc,
enum dwc2_halt_status halt_status,
u32 n_bytes, int *xfer_done)
{
@@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_qh *qh = chan->qh;
struct dwc2_hcd_urb *urb = qtd->urb;
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
u32 n_bytes;
int failed;
@@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_cpu(hsotg->dev,
qh->desc_list_dma + (desc_num *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[desc_num];
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 906f223542ee..b8f4b6aaf1d0 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
u32 *hprt0_modify)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
int do_reset = 0;
u32 usbcfg;
u32 prtspd;
@@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
} else {
hsotg->flags.b.port_enable_change = 1;
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
u32 hcfg;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
hsotg->new_connection = false;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg &= ~HCFG_DESCDMA;
@@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
@@ -743,7 +743,7 @@ cleanup:
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels++;
} else {
switch (chan->ep_type) {
@@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_release_channel(hsotg, chan, qtd, halt_status);
@@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
u32 len;
+ u32 hctsiz;
+ u32 pid;
if (!qtd->urb)
return 0;
@@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
qtd->isoc_split_offset += len;
- if (frame_desc->actual_length >= frame_desc->length) {
+ hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+ if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
frame_desc->status = 0;
qtd->isoc_frame_index++;
qtd->complete_split = 0;
@@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
/* Do not disable the interrupt, just clear it */
@@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
/* Handle xfer complete on CSPLIT */
if (chan->qh->do_split) {
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
if (qtd->complete_split &&
dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
qtd))
@@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_STALL);
goto handle_stall_done;
@@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
- if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
+ if (hsotg->params.host_dma > 0 && chan->ep_is_in) {
/*
* NAK interrupts are enabled on bulk/control IN
* transfers in DMA mode for the sole purpose of
@@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
*/
if (chan->do_split && chan->complete_split) {
if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
qtd->isoc_frame_index++;
@@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh = chan->qh;
bool past_end;
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
int frnum = dwc2_hcd_get_frame_number(hsotg);
/* Don't have num_hs_transfers; simple logic */
@@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
goto disable_int;
@@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
/* Core halts the channel for Descriptor DMA mode */
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_AHB_ERR);
goto handle_ahberr_done;
@@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
goto handle_xacterr_done;
@@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
(chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
- hsotg->core_params->dma_desc_enable <= 0)) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ hsotg->params.dma_desc_enable <= 0)) {
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
} else if (chan->hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XACTERR) &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
if (out_nak_enh) {
if (chan->hcint &
(HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
@@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
*/
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_AHBERR) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
@@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
chnum);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
} else {
if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
@@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
* interrupt unmasked
*/
WARN_ON(hcint != HCINTMSK_CHHLTD);
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
hcint &= ~HCINTMSK_CHHLTD;
}
@@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
}
}
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
if (haint & (1 << i))
dwc2_hc_n_intr(hsotg, i);
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 13754353251f..5713f03a4e56 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
int status;
int num_channels;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
if (hsotg->periodic_channels + hsotg->non_periodic_channels <
num_channels
&& hsotg->periodic_channels < num_channels - 1) {
@@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
}
}
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT. Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it. If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
+ */
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ unsigned long *map;
+
+ /* Don't expect to be missing a TT and be doing low speed scheduling */
+ if (WARN_ON(!qh->dwc_tt))
+ return NULL;
+
+ /* Get the map and adjust if this is a multi_tt hub */
+ map = qh->dwc_tt->periodic_bitmaps;
+ if (qh->dwc_tt->usb_tt->multi)
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+
+ return map;
+}
+
+#ifdef DWC2_PRINT_SCHEDULE
/*
* cat_printf() - A printf() + strcat() helper
*
@@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period,
}
}
-/**
- * dwc2_get_ls_map() - Get the map used for the given qh
- *
- * @hsotg: The HCD state structure for the DWC OTG controller.
- * @qh: QH for the periodic transfer.
- *
- * We'll always get the periodic map out of our TT. Note that even if we're
- * running the host straight in low speed / full speed mode it appears as if
- * a TT is allocated for us, so we'll use it. If that ever changes we can
- * add logic here to get a map out of "hsotg" if !qh->do_split.
- *
- * Returns: the map or NULL if a map couldn't be found.
- */
-static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
- struct dwc2_qh *qh)
-{
- unsigned long *map;
-
- /* Don't expect to be missing a TT and be doing low speed scheduling */
- if (WARN_ON(!qh->dwc_tt))
- return NULL;
-
- /* Get the map and adjust if this is a multi_tt hub */
- map = qh->dwc_tt->periodic_bitmaps;
- if (qh->dwc_tt->usb_tt->multi)
- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
-
- return map;
-}
struct dwc2_qh_print_data {
struct dwc2_hsotg *hsotg;
@@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
* If we don't have tracing turned on, don't run unless the special
* define is turned on.
*/
-#ifndef DWC2_PRINT_SCHEDULE
- return;
-#endif
if (qh->schedule_low_speed) {
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
@@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
dwc2_qh_print, &print_data);
}
-
+ return;
}
+#else
+static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh) {};
+#endif
/**
* dwc2_ls_pmap_schedule() - Schedule a low speed QH
@@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
next_active_frame = earliest_frame;
/* Get the "no microframe schduler" out of the way... */
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
if (qh->do_split)
/* Splits are active at microframe 0 minus 1 */
next_active_frame |= 0x7;
@@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
status = dwc2_uframe_schedule(hsotg, qh);
} else {
status = dwc2_periodic_channel_available(hsotg);
@@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
return status;
}
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
/* Reserve periodic channel */
hsotg->periodic_channels++;
@@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->host_us;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
dwc2_uframe_unschedule(hsotg, qh);
} else {
/* Release periodic channel reservation */
@@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
int status = 0;
max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
- max_channel_xfer_size = hsotg->core_params->max_transfer_size;
+ max_channel_xfer_size = hsotg->params.max_transfer_size;
if (max_xfer_size > max_channel_xfer_size) {
dev_err(hsotg->dev,
@@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
qh->unreserve_pending = 0;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
else
@@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
dwc2_qh_init(hsotg, qh, urb, mem_flags);
- if (hsotg->core_params->dma_desc_enable > 0 &&
+ if (hsotg->params.dma_desc_enable > 0 &&
dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
dwc2_hcd_qh_free(hsotg, qh);
return NULL;
@@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
intr_mask &= ~GINTSTS_SOF;
dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 91058441e62a..5be056b39e5c 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -412,6 +412,7 @@
/* Device mode registers */
#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN (1 << 23)
#define DCFG_EPMISCNT_MASK (0x1f << 18)
#define DCFG_EPMISCNT_SHIFT 18
#define DCFG_EPMISCNT_LIMIT 0x1f
@@ -473,6 +474,7 @@
#define DIEPMSK_XFERCOMPLMSK (1 << 0)
#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK (1 << 9)
#define DOEPMSK_BACK2BACKSETUP (1 << 6)
#define DOEPMSK_STSPHSERCVDMSK (1 << 5)
#define DOEPMSK_OUTTKNEPDISMSK (1 << 4)
@@ -790,7 +792,8 @@
#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
/**
- * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
*
* @status: DMA descriptor status quadlet
* @buf: DMA descriptor data buffer pointer
@@ -798,10 +801,12 @@
* DMA Descriptor structure contains two quadlets:
* Status quadlet and Data buffer pointer.
*/
-struct dwc2_hcd_dma_desc {
+struct dwc2_dma_desc {
u32 status;
u32 buf;
-};
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
#define HOST_DMA_A (1 << 31)
#define HOST_DMA_STS_MASK (0x3 << 28)
@@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc {
#define HOST_DMA_ISOC_NBYTES_SHIFT 0
#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
+
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L (1 << 27)
+#define DEV_DMA_SHORT (1 << 26)
+#define DEV_DMA_IOC (1 << 25)
+#define DEV_DMA_SR (1 << 24)
+#define DEV_DMA_MTRF (1 << 23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
-#define MAX_DMA_DESC_SIZE 131071
#define MAX_DMA_DESC_NUM_GENERIC 64
#define MAX_DMA_DESC_NUM_HS_ISOC 256
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
new file mode 100644
index 000000000000..a786256535b6
--- /dev/null
+++ b/drivers/usb/dwc2/params.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (C) 2004-2016 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "core.h"
+
+static const struct dwc2_core_params params_hi6220 = {
+ .otg_cap = 2, /* No HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 512,
+ .host_perio_tx_fifo_size = 512,
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 16,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8,
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_bcm2835 = {
+ .otg_cap = 0, /* HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 774, /* 774 DWORDs */
+ .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
+ .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8, /* 8 bits */
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = 0x10,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_rk3066 = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = -1,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 525, /* 525 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_ltq = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = -1,
+ .dma_desc_fs_enable = -1,
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 288, /* 288 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_amlogic = {
+ .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = DWC2_SPEED_PARAM_HIGH,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 500,
+ .host_perio_tx_fifo_size = 500,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = 16,
+ .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = 1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_default = {
+ .otg_cap = -1,
+ .otg_ver = -1,
+
+ /*
+ * Disable descriptor dma mode by default as the HW can support
+ * it, but does not support it for SPLIT transactions.
+ * Disable it for FS devices as well.
+ */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = -1,
+ .host_nperio_tx_fifo_size = -1,
+ .host_perio_tx_fifo_size = -1,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = -1,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
+ { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
+ { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
+ { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
+ { .compatible = "snps,dwc2", .data = NULL },
+ { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+ { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
+ { .compatible = "amcc,dwc-otg", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
+static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
+ char *property, u8 size, u64 *value)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+
+ switch (size) {
+ case 0:
+ *value = device_property_read_bool(hsotg->dev, property);
+ break;
+ case 1:
+ if (device_property_read_u8(hsotg->dev, property, &val8))
+ return;
+
+ *value = val8;
+ break;
+ case 2:
+ if (device_property_read_u16(hsotg->dev, property, &val16))
+ return;
+
+ *value = val16;
+ break;
+ case 4:
+ if (device_property_read_u32(hsotg->dev, property, &val32))
+ return;
+
+ *value = val32;
+ break;
+ case 8:
+ if (device_property_read_u64(hsotg->dev, property, value))
+ return;
+
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+static void dwc2_set_core_param(void *param, u8 size, u64 value)
+{
+ switch (size) {
+ case 0:
+ *((bool *)param) = !!value;
+ break;
+ case 1:
+ *((u8 *)param) = (u8)value;
+ break;
+ case 2:
+ *((u16 *)param) = (u16)value;
+ break;
+ case 4:
+ *((u32 *)param) = (u32)value;
+ break;
+ case 8:
+ *((u64 *)param) = (u64)value;
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+/**
+ * dwc2_set_param() - Set a core parameter
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @param: Pointer to the parameter to set
+ * @lookup: True if the property should be looked up
+ * @property: The device property to read
+ * @legacy: The param value to set if @property is not available. This
+ * will typically be the legacy value set in the static
+ * params structure.
+ * @def: The default value
+ * @min: The minimum value
+ * @max: The maximum value
+ * @size: The size of the core parameter in bytes, or 0 for bool.
+ *
+ * This function looks up @property and sets the @param to that value.
+ * If the property doesn't exist it uses the passed-in @value. It will
+ * verify that the value falls between @min and @max. If it doesn't,
+ * it will output an error and set the parameter to either @def or,
+ * failing that, to @min.
+ *
+ * The @size is used to write to @param and to query the device
+ * properties so that this same function can be used with different
+ * types of parameters.
+ */
+static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
+ bool lookup, char *property, u64 legacy,
+ u64 def, u64 min, u64 max, u8 size)
+{
+ u64 sizemax;
+ u64 value;
+
+ if (WARN_ON(!hsotg || !param || !property))
+ return;
+
+ if (WARN((size > 8) || ((size & (size - 1)) != 0),
+ "Invalid size %d for %s\n", size, property))
+ return;
+
+ dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n",
+ __func__, property, legacy, def, min, max, size);
+
+ sizemax = (1ULL << (size * 8)) - 1;
+ value = legacy;
+
+ /* Override legacy settings. */
+ if (lookup)
+ dwc2_get_device_property(hsotg, property, size, &value);
+
+ /*
+ * While the value is not valid, try setting it to the default
+ * value, and failing that, set it to the minimum.
+ */
+ while ((value < min) || (value > max)) {
+ /* Print an error unless the value is set to auto. */
+ if (value != sizemax)
+ dev_err(hsotg->dev, "Invalid value %llu for param %s\n",
+ value, property);
+
+ /*
+ * If we are already the default, just set it to the
+ * minimum.
+ */
+ if (value == def) {
+ dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n",
+ __func__, min);
+ value = min;
+ break;
+ }
+
+ /* Try the default value */
+ dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n",
+ __func__, def);
+ value = def;
+ }
+
+ dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value);
+ dwc2_set_core_param(param, size, value);
+}
+
+/**
+ * dwc2_set_param_u16() - Set a u16 parameter
+ *
+ * See dwc2_set_param().
+ */
+static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+ bool lookup, char *property, u16 legacy,
+ u16 def, u16 min, u16 max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ legacy, def, min, max, 2);
+}
+
+/**
+ * dwc2_set_param_bool() - Set a bool parameter
+ *
+ * See dwc2_set_param().
+ *
+ * Note: there is no 'legacy' argument here because there is no legacy
+ * source of bool params.
+ */
+static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param,
+ bool lookup, char *property,
+ bool def, bool min, bool max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ def, def, min, max, 0);
+}
+
+#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
+
+/* Parameter access functions */
+static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ switch (val) {
+ case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ valid = 0;
+ break;
+ case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+ break;
+ case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for otg_cap parameter. Check HW configuration.\n",
+ val);
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
+ break;
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
+ break;
+ default:
+ val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ break;
+ }
+ dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
+ }
+
+ hsotg->params.otg_cap = val;
+}
+
+static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
+ }
+
+ hsotg->params.dma_desc_enable = val;
+}
+
+static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ }
+
+ hsotg->params.dma_desc_fs_enable = val;
+ dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
+}
+
+static void
+dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_support_fs_low_power\n");
+ dev_err(hsotg->dev,
+ "host_support_fs_low_power must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev,
+ "Setting host_support_fs_low_power to %d\n", val);
+ }
+
+ hsotg->params.host_support_fs_ls_low_power = val;
+}
+
+static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.enable_dynamic_fifo;
+ dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
+ }
+
+ hsotg->params.enable_dynamic_fifo = val;
+}
+
+static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.rx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.rx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
+ }
+
+ hsotg->params.host_rx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_nperio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_nperio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_perio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_perio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_transfer_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_transfer_size;
+ dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
+ }
+
+ hsotg->params.max_transfer_size = val;
+}
+
+static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 15 || val > hsotg->hw_params.max_packet_count)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_packet_count. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_packet_count;
+ dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
+ }
+
+ hsotg->params.max_packet_count = val;
+}
+
+static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 1 || val > hsotg->hw_params.host_channels)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_channels. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_channels;
+ dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
+ }
+
+ hsotg->params.host_channels = val;
+}
+
+static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+ u32 hs_phy_type, fs_phy_type;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+ DWC2_PHY_TYPE_PARAM_ULPI)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_type\n");
+ dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
+ }
+
+ valid = 0;
+ }
+
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
+ if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_FS &&
+ fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ valid = 1;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for phy_type. Check HW configuration.\n",
+ val);
+ val = DWC2_PHY_TYPE_PARAM_FS;
+ if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
+ if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
+ val = DWC2_PHY_TYPE_PARAM_UTMI;
+ else
+ val = DWC2_PHY_TYPE_PARAM_ULPI;
+ }
+ dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
+ }
+
+ hsotg->params.phy_type = val;
+}
+
+static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.phy_type;
+}
+
+static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for speed parameter\n");
+ dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n");
+ }
+ valid = 0;
+ }
+
+ if (dwc2_is_hs_iot(hsotg) &&
+ val == DWC2_SPEED_PARAM_LOW)
+ valid = 0;
+
+ if (val == DWC2_SPEED_PARAM_HIGH &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for speed parameter. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
+ DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
+ dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
+ }
+
+ hsotg->params.speed = val;
+}
+
+static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+ DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_ls_low_power_phy_clk parameter\n");
+ dev_err(hsotg->dev,
+ "host_ls_low_power_phy_clk must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
+ ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
+ : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+ dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
+ val);
+ }
+
+ hsotg->params.host_ls_low_power_phy_clk = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
+ dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ddr = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for phy_ulpi_ext_vbus\n");
+ dev_err(hsotg->dev,
+ "phy_ulpi_ext_vbus must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ext_vbus = val;
+}
+
+static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+
+ switch (hsotg->hw_params.utmi_phy_data_width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (val == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (val == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (val == 8 || val == 16);
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "%d invalid for phy_utmi_width. Check HW configuration.\n",
+ val);
+ }
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
+ dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
+ }
+
+ hsotg->params.phy_utmi_width = val;
+}
+
+static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
+ dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
+ }
+
+ hsotg->params.ulpi_fs_ls = val;
+}
+
+static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ts_dline\n");
+ dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
+ }
+
+ hsotg->params.ts_dline = val;
+}
+
+static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
+ dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
+ }
+
+ valid = 0;
+ }
+
+ if (val == 1 && !(hsotg->hw_params.i2c_enable))
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for i2c_enable. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.i2c_enable;
+ dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
+ }
+
+ hsotg->params.i2c_enable = val;
+}
+
+static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for en_multiple_tx_fifo,\n");
+ dev_err(hsotg->dev,
+ "en_multiple_tx_fifo must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.en_multiple_tx_fifo;
+ dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
+ }
+
+ hsotg->params.en_multiple_tx_fifo = val;
+}
+
+static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter reload_ctl\n", val);
+ dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter reload_ctl. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
+ dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
+ }
+
+ hsotg->params.reload_ctl = val;
+}
+
+static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+{
+ if (val != -1)
+ hsotg->params.ahbcfg = val;
+ else
+ hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter otg_ver\n", val);
+ dev_err(hsotg->dev,
+ "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
+ }
+
+ hsotg->params.otg_ver = val;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ }
+
+ hsotg->params.uframe_sched = val;
+}
+
+static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter external_id_pin_ctl\n",
+ val);
+ dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
+ }
+
+ hsotg->params.external_id_pin_ctl = val;
+}
+
+static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter hibernation\n",
+ val);
+ dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
+ }
+
+ hsotg->params.hibernation = val;
+}
+
+static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+ int i;
+ int num;
+ char *property = "g-tx-fifo-size";
+ struct dwc2_core_params *p = &hsotg->params;
+
+ memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
+
+ /* Read tx fifo sizes */
+ num = device_property_read_u32_array(hsotg->dev, property, NULL, 0);
+
+ if (num > 0) {
+ device_property_read_u32_array(hsotg->dev, property,
+ &p->g_tx_fifo_size[1],
+ num);
+ } else {
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
+
+ memcpy(&p->g_tx_fifo_size[1],
+ p_tx_fifo,
+ sizeof(p_tx_fifo));
+
+ num = ARRAY_SIZE(p_tx_fifo);
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size))
+ break;
+
+ dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n",
+ property, i + 1, p->g_tx_fifo_size[i + 1]);
+ }
+}
+
+static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ /* Buffer DMA */
+ dwc2_set_param_bool(hsotg, &p->g_dma,
+ false, "gadget-dma",
+ true, false,
+ dma_capable);
+
+ /* DMA Descriptor */
+ dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
+ "gadget-dma-desc",
+ p->g_dma, false,
+ !!hw->dma_desc_enable);
+}
+
+/**
+ * dwc2_set_parameters() - Set all core parameters.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @params: The parameters to set
+ */
+static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+ if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting HOST parameters\n");
+
+ dwc2_set_param_bool(hsotg, &p->host_dma,
+ false, "host-dma",
+ true, false,
+ dma_capable);
+ }
+ dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
+
+ dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+ params->host_support_fs_ls_low_power);
+ dwc2_set_param_enable_dynamic_fifo(hsotg,
+ params->enable_dynamic_fifo);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
+ dwc2_set_param_max_transfer_size(hsotg,
+ params->max_transfer_size);
+ dwc2_set_param_max_packet_count(hsotg,
+ params->max_packet_count);
+ dwc2_set_param_host_channels(hsotg, params->host_channels);
+ dwc2_set_param_phy_type(hsotg, params->phy_type);
+ dwc2_set_param_speed(hsotg, params->speed);
+ dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+ params->host_ls_low_power_phy_clk);
+ dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+ dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+ params->phy_ulpi_ext_vbus);
+ dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+ dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+ dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+ dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+ dwc2_set_param_en_multiple_tx_fifo(hsotg,
+ params->en_multiple_tx_fifo);
+ dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+ dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+ dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
+ dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
+ dwc2_set_param_hibernation(hsotg, params->hibernation);
+
+ /*
+ * Set devicetree-only parameters. These parameters do not
+ * take any values from @params.
+ */
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting peripheral device properties\n");
+
+ dwc2_set_gadget_dma(hsotg);
+
+ /*
+ * The values for g_rx_fifo_size (2048) and
+ * g_np_tx_fifo_size (1024) come from the legacy s3c
+ * gadget driver. These defaults have been hard-coded
+ * for some time so many platforms depend on these
+ * values. Leave them as defaults for now and only
+ * auto-detect if the hardware does not support the
+ * default.
+ */
+ dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ true, "g-rx-fifo-size", 2048,
+ hw->rx_fifo_size,
+ 16, hw->rx_fifo_size);
+
+ dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ true, "g-np-tx-fifo-size", 1024,
+ hw->dev_nperio_tx_fifo_size,
+ 16, hw->dev_nperio_tx_fifo_size);
+
+ dwc2_set_param_tx_fifo_sizes(hsotg);
+ }
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ u32 hptxfsiz;
+ bool forced;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, true);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ bool forced;
+ u32 gnptxfsiz;
+
+ if (hsotg->dr_mode == USB_DR_MODE_HOST)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, false);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned int width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 grxfsiz;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+ * as in "OTG version 2.xx" or "OTG version 3.xx".
+ */
+ hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+ (hw->snpsid & 0xfffff000) != 0x4f543000 &&
+ (hw->snpsid & 0xffff0000) != 0x55310000 &&
+ (hw->snpsid & 0xffff0000) != 0x55320000) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+ hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
+ hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
+ hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
+ hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+ grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+ dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+ dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+ dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+ dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
+
+ /* hwcfg1 */
+ hw->dev_ep_dirs = hwcfg1;
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+ /* fifo sizes */
+ hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+
+ dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+ dev_dbg(hsotg->dev, " op_mode=%d\n",
+ hw->op_mode);
+ dev_dbg(hsotg->dev, " arch=%d\n",
+ hw->arch);
+ dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
+ hw->dma_desc_enable);
+ dev_dbg(hsotg->dev, " power_optimized=%d\n",
+ hw->power_optimized);
+ dev_dbg(hsotg->dev, " i2c_enable=%d\n",
+ hw->i2c_enable);
+ dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
+ hw->hs_phy_type);
+ dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
+ hw->fs_phy_type);
+ dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
+ hw->utmi_phy_data_width);
+ dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
+ hw->num_dev_ep);
+ dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
+ hw->num_dev_perio_in_ep);
+ dev_dbg(hsotg->dev, " host_channels=%d\n",
+ hw->host_channels);
+ dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
+ hw->max_transfer_size);
+ dev_dbg(hsotg->dev, " max_packet_count=%d\n",
+ hw->max_packet_count);
+ dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
+ hw->nperio_tx_q_depth);
+ dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
+ hw->host_perio_tx_q_depth);
+ dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
+ hw->dev_token_q_depth);
+ dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
+ hw->enable_dynamic_fifo);
+ dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
+ hw->en_multiple_tx_fifo);
+ dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
+ hw->total_fifo_size);
+ dev_dbg(hsotg->dev, " rx_fifo_size=%d\n",
+ hw->rx_fifo_size);
+ dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
+ hw->host_nperio_tx_fifo_size);
+ dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
+ hw->host_perio_tx_fifo_size);
+ dev_dbg(hsotg->dev, "\n");
+
+ return 0;
+}
+
+int dwc2_init_params(struct dwc2_hsotg *hsotg)
+{
+ const struct of_device_id *match;
+ struct dwc2_core_params params;
+
+ match = of_match_device(dwc2_of_match_table, hsotg->dev);
+ if (match && match->data)
+ params = *((struct dwc2_core_params *)match->data);
+ else
+ params = params_default;
+
+ if (dwc2_is_fs_iot(hsotg)) {
+ params.speed = DWC2_SPEED_PARAM_FULL;
+ params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ }
+
+ dwc2_set_parameters(hsotg, &params);
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index ae419615a176..a23329e3d7cd 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -62,6 +62,20 @@ struct dwc2_pci_glue {
struct platform_device *phy;
};
+static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
+ pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
+ struct property_entry properties[] = {
+ { },
+ };
+
+ return platform_device_add_properties(dwc2, properties);
+ }
+
+ return 0;
+}
+
static void dwc2_pci_remove(struct pci_dev *pci)
{
struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
@@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
return PTR_ERR(phy);
}
+ ret = dwc2_pci_quirks(pci, dwc2);
+ if (ret)
+ goto err;
+
ret = platform_device_add(dwc2);
if (ret) {
dev_err(dev, "failed to register dwc2 device\n");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 8e1728b39a49..4fc8c603afb8 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -55,165 +55,6 @@
static const char dwc2_driver_name[] = "dwc2";
-static const struct dwc2_core_params params_hi6220 = {
- .otg_cap = 2, /* No HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 512,
- .host_perio_tx_fifo_size = 512,
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 16,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8,
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_bcm2835 = {
- .otg_cap = 0, /* HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 774, /* 774 DWORDs */
- .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
- .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 8,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8, /* 8 bits */
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = 0x10,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_rk3066 = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = -1,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 525, /* 525 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_ltq = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = -1,
- .dma_desc_fs_enable = -1,
- .speed = -1,
- .enable_dynamic_fifo = -1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 288, /* 288 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_amlogic = {
- .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
- .otg_ver = -1,
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = DWC2_SPEED_PARAM_HIGH,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 500,
- .host_perio_tx_fifo_size = 500,
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = 16,
- .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = 1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
disable_irq(hsotg->irq);
}
-static const struct of_device_id dwc2_of_match_table[] = {
- { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
- { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
- { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
- { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
- { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
- { .compatible = "snps,dwc2", .data = NULL },
- { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
- { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
- { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
- {},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
@@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
- const struct of_device_id *match;
- const struct dwc2_core_params *params;
- struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
- match = of_match_device(dwc2_of_match_table, &dev->dev);
- if (match && match->data) {
- params = match->data;
- } else {
- /* Default all params to autodetect */
- dwc2_set_all_params(&defparams, -1);
- params = &defparams;
-
- /*
- * Disable descriptor dma mode by default as the HW can support
- * it, but does not support it for SPLIT transactions.
- * Disable it for FS devices as well.
- */
- defparams.dma_desc_enable = 0;
- defparams.dma_desc_fs_enable = 0;
- }
-
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;
@@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
- hsotg->core_params = devm_kzalloc(&dev->dev,
- sizeof(*hsotg->core_params), GFP_KERNEL);
- if (!hsotg->core_params)
- return -ENOMEM;
-
- dwc2_set_all_params(hsotg->core_params, -1);
-
hsotg->irq = platform_get_irq(dev, 0);
if (hsotg->irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
@@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
- /* Validate parameter values */
- dwc2_set_parameters(hsotg, params);
-
dwc2_force_dr_mode(hsotg);
+ retval = dwc2_init_params(hsotg);
+ if (retval)
+ goto error;
+
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg, hsotg->irq);
if (retval)