diff options
Diffstat (limited to 'drivers/net/ethernet/ti/davinci_cpdma.c')
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.c | 272 |
1 files changed, 181 insertions, 91 deletions
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index e4d6edf387b3..d2eab5cd1e0c 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1,16 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments CPDMA Driver * * Copyright (C) 2010 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/spinlock.h> @@ -141,6 +134,15 @@ struct cpdma_control_info { #define ACCESS_RW (ACCESS_RO | ACCESS_WO) }; +struct submit_info { + struct cpdma_chan *chan; + int directed; + void *token; + void *data_virt; + dma_addr_t data_dma; + int len; +}; + static struct cpdma_control_info controls[] = { [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW}, [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, @@ -183,6 +185,8 @@ static struct cpdma_control_info controls[] = { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) +#define CPDMA_DMA_EXT_MAP BIT(16) + static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) { struct cpdma_desc_pool *pool = ctlr->pool; @@ -191,7 +195,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) return; WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), - "cpdma_desc_pool size %d != avail %d", + "cpdma_desc_pool size %zd != avail %zd", gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) @@ -205,7 +209,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ -int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) +static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) { struct cpdma_params *cpdma_params = &ctlr->params; struct cpdma_desc_pool *pool; @@ -406,37 +410,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, struct cpdma_chan *chan; u32 old_rate = ch->rate; u32 new_rmask = 0; - int rlim = 1; + int rlim = 0; int i; - *prio_mode = 0; for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { chan = ctlr->channels[i]; - if (!chan) { - rlim = 0; + if (!chan) continue; - } if (chan == ch) chan->rate = rate; if (chan->rate) { - if (rlim) { - new_rmask |= chan->mask; - } else { - ch->rate = old_rate; - dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n", - chan->chan_num); - return -EINVAL; - } - } else { - *prio_mode = 1; - rlim = 0; + rlim = 1; + new_rmask |= chan->mask; + continue; } + + if (rlim) + goto err; } *rmask = new_rmask; + *prio_mode = rlim; return 0; + +err: + ch->rate = old_rate; + dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", + chan->chan_num); + return -EINVAL; } static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, @@ -528,7 +531,6 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->num_chan = CPDMA_MAX_CHANNELS; return ctlr; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_create); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { @@ -589,7 +591,6 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_start); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { @@ -622,7 +623,6 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { @@ -640,7 +640,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) cpdma_desc_pool_destroy(ctlr); return ret; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { @@ -661,25 +660,21 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } -EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) { return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); } -EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) { return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); } -EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, int rx, int desc_num, @@ -723,11 +718,11 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, most_chan->desc_num += desc_cnt; } -/** +/* * cpdma_chan_split_pool - Splits ctrl pool between all channels. * Has to be called under ctlr lock */ -int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) { int tx_per_ch_desc = 0, rx_per_ch_desc = 0; int free_rx_num = 0, free_tx_num = 0; @@ -775,7 +770,6 @@ int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); /* cpdma_chan_set_weight - set weight of a channel in percentage. @@ -808,7 +802,6 @@ int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_set_weight); /* cpdma_chan_get_min_rate - get minimum allowed rate for channel * Should be called before cpdma_chan_set_rate. @@ -823,7 +816,6 @@ u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) return DIV_ROUND_UP(divident, divisor); } -EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); /* cpdma_chan_set_rate - limits bandwidth for transmit channel. * The bandwidth * limited channels have to be in order beginning from lowest. @@ -868,7 +860,6 @@ err: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_set_rate); u32 cpdma_chan_get_rate(struct cpdma_chan *ch) { @@ -881,7 +872,6 @@ u32 cpdma_chan_get_rate(struct cpdma_chan *ch) return rate; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_rate); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler, int rx_type) @@ -893,7 +883,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); if (__chan_linear(chan_num) >= ctlr->num_chan) - return NULL; + return ERR_PTR(-EINVAL); chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); if (!chan) @@ -941,7 +931,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, spin_unlock_irqrestore(&ctlr->lock, flags); return chan; } -EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) { @@ -954,7 +943,6 @@ int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) return desc_num; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); int cpdma_chan_destroy(struct cpdma_chan *chan) { @@ -976,7 +964,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_destroy); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats) @@ -989,7 +976,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, spin_unlock_irqrestore(&chan->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) @@ -1027,34 +1013,25 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan, } } -int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, int directed) +static int cpdma_chan_submit_si(struct submit_info *si) { + struct cpdma_chan *chan = si->chan; struct cpdma_ctlr *ctlr = chan->ctlr; + int len = si->len; struct cpdma_desc __iomem *desc; dma_addr_t buffer; - unsigned long flags; u32 mode; - int ret = 0; - - spin_lock_irqsave(&chan->lock, flags); - - if (chan->state == CPDMA_STATE_TEARDOWN) { - ret = -EINVAL; - goto unlock_ret; - } + int ret; if (chan->count >= chan->desc_num) { chan->stats.desc_alloc_fail++; - ret = -ENOMEM; - goto unlock_ret; + return -ENOMEM; } desc = cpdma_desc_alloc(ctlr->pool); if (!desc) { chan->stats.desc_alloc_fail++; - ret = -ENOMEM; - goto unlock_ret; + return -ENOMEM; } if (len < ctlr->params.min_packet_size) { @@ -1062,16 +1039,20 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, chan->stats.runt_transmit_buff++; } - buffer = dma_map_single(ctlr->dev, data, len, chan->dir); - ret = dma_mapping_error(ctlr->dev, buffer); - if (ret) { - cpdma_desc_free(ctlr->pool, desc, 1); - ret = -EINVAL; - goto unlock_ret; - } - mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; - cpdma_desc_to_port(chan, mode, directed); + cpdma_desc_to_port(chan, mode, si->directed); + + if (si->data_dma) { + buffer = si->data_dma; + dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); + } else { + buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); + ret = dma_mapping_error(ctlr->dev, buffer); + if (ret) { + cpdma_desc_free(ctlr->pool, desc, 1); + return -EINVAL; + } + } /* Relaxed IO accessors can be used here as there is read barrier * at the end of write sequence. @@ -1080,9 +1061,10 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, writel_relaxed(buffer, &desc->hw_buffer); writel_relaxed(len, &desc->hw_len); writel_relaxed(mode | len, &desc->hw_mode); - writel_relaxed(token, &desc->sw_token); + writel_relaxed((uintptr_t)si->token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); - writel_relaxed(len, &desc->sw_len); + writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, + &desc->sw_len); desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); @@ -1091,12 +1073,108 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, chan_write(chan, rxfree, 1); chan->count++; + return 0; +} -unlock_ret: +int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data, + int len, int directed) +{ + struct submit_info si; + unsigned long flags; + int ret; + + si.chan = chan; + si.token = token; + si.data_virt = data; + si.data_dma = 0; + si.len = len; + si.directed = directed; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state == CPDMA_STATE_TEARDOWN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + ret = cpdma_chan_submit_si(&si); + spin_unlock_irqrestore(&chan->lock, flags); + return ret; +} + +int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token, + dma_addr_t data, int len, int directed) +{ + struct submit_info si; + unsigned long flags; + int ret; + + si.chan = chan; + si.token = token; + si.data_virt = NULL; + si.data_dma = data; + si.len = len; + si.directed = directed; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state == CPDMA_STATE_TEARDOWN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + ret = cpdma_chan_submit_si(&si); + spin_unlock_irqrestore(&chan->lock, flags); + return ret; +} + +int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, + int len, int directed) +{ + struct submit_info si; + unsigned long flags; + int ret; + + si.chan = chan; + si.token = token; + si.data_virt = data; + si.data_dma = 0; + si.len = len; + si.directed = directed; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state != CPDMA_STATE_ACTIVE) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + ret = cpdma_chan_submit_si(&si); + spin_unlock_irqrestore(&chan->lock, flags); + return ret; +} + +int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token, + dma_addr_t data, int len, int directed) +{ + struct submit_info si; + unsigned long flags; + int ret; + + si.chan = chan; + si.token = token; + si.data_virt = NULL; + si.data_dma = data; + si.len = len; + si.directed = directed; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state != CPDMA_STATE_ACTIVE) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + ret = cpdma_chan_submit_si(&si); spin_unlock_irqrestore(&chan->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { @@ -1111,7 +1189,6 @@ bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return free_tx_desc; } -EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, @@ -1121,15 +1198,22 @@ static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t buff_dma; int origlen; - void *token; + uintptr_t token; - token = (void *)desc_read(desc, sw_token); - buff_dma = desc_read(desc, sw_buffer); + token = desc_read(desc, sw_token); origlen = desc_read(desc, sw_len); - dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); + buff_dma = desc_read(desc, sw_buffer); + if (origlen & CPDMA_DMA_EXT_MAP) { + origlen &= ~CPDMA_DMA_EXT_MAP; + dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen, + chan->dir); + } else { + dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); + } + cpdma_desc_free(pool, desc, 1); - (*chan->handler)(token, outlen, status); + (*chan->handler)((void *)token, outlen, status); } static int __cpdma_chan_process(struct cpdma_chan *chan) @@ -1164,7 +1248,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) outlen -= CPDMA_DESC_CRC_LEN; status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | - CPDMA_DESC_PORT_MASK); + CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); @@ -1205,7 +1289,6 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota) } return used; } -EXPORT_SYMBOL_GPL(cpdma_chan_process); int cpdma_chan_start(struct cpdma_chan *chan) { @@ -1225,7 +1308,6 @@ int cpdma_chan_start(struct cpdma_chan *chan) return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_start); int cpdma_chan_stop(struct cpdma_chan *chan) { @@ -1288,7 +1370,6 @@ int cpdma_chan_stop(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_stop); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { @@ -1330,25 +1411,34 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) return ret; } -EXPORT_SYMBOL_GPL(cpdma_control_set); int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) { return ctlr->num_rx_desc; } -EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) { return ctlr->num_tx_desc; } -EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); -void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) +int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) { + unsigned long flags; + int temp, ret; + + spin_lock_irqsave(&ctlr->lock, flags); + + temp = ctlr->num_rx_desc; ctlr->num_rx_desc = num_rx_desc; ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; -} -EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); + ret = cpdma_chan_split_pool(ctlr); + if (ret) { + ctlr->num_rx_desc = temp; + ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; + } + + spin_unlock_irqrestore(&ctlr->lock, flags); -MODULE_LICENSE("GPL"); + return ret; +} |
