diff options
author | Olof Johansson <olof@lixom.net> | 2014-03-09 12:03:18 -0700 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2014-03-09 12:03:18 -0700 |
commit | 1760e4f855a2ead08a40deac9abd4fb8cdf3af32 (patch) | |
tree | 8ee1c79f4c46fd225d90e832def3d7f7e3742086 /net/sctp/associola.c | |
parent | 1e871089f66416ce540d8e362225a0878a5d2c06 (diff) | |
parent | c8ae7e9bfc8caf679e891c4f0a04f2435b45e2da (diff) |
Merge tag 'imx-soc-3.15' of git://git.linaro.org/people/shawnguo/linux-2.6 into next/soc
i.MX SoC changes for 3.15 from Shawn Guo:
- Support suspend from ocram (DDR IO floating) for imx6 platforms
- Add cpuidle support for imx6sl
- Sparse warning fixes for imx6sl and vf610 clock code
- Remove PWM platform code
- Support ptp and rmii clock from pad
- Support WEIM CS GPR configuration
- Random cleanups and defconfig updates
* tag 'imx-soc-3.15' of git://git.linaro.org/people/shawnguo/linux-2.6: (373 commits)
ARM: imx6: drop .text.head section annotation from headsmp.S
ARM: imx6: build suspend-imx6.o with CONFIG_SOC_IMX6
ARM: imx6: rename pm-imx6q.c to pm-imx6.c
ARM: imx6: introduce CONFIG_SOC_IMX6 for i.MX6 common stuff
ARM: imx6: do not call imx6q_suspend_init() with !CONFIG_SUSPEND
ARM: imx6: call suspend_set_ops() from suspend routine
ARM: imx6: build headsmp.o only on CONFIG_SMP
ARM: imx6: move v7_cpu_resume() into suspend-imx6.S
ARM i.MX6q: Mark VPU and IPU AXI transfers as cacheable, increase IPU priority
ARM: imx6q: Add GPR6 and GPR7 register definitions for iomuxc gpr
bus: imx-weim: support CS GPR configuration
ARM: mach-imx: Kconfig: Remove IMX_HAVE_PLATFORM_IMX2_WDT from SOC_IMX53
ARM: imx_v6_v7_defconfig: Select CONFIG_DEBUG_FS
ARM: mach-imx: Select CONFIG_SRAM at ARCH_MXC level
ARM: imx: add speed grading check for i.mx6 soc
ARM: imx: avoid calling clk APIs in idle thread which may cause schedule
ARM: imx6q: support ptp and rmii clock from pad
ARM: imx6q: remove unneeded clk lookups
ARM: imx_v6_v7_defconfig: Select CONFIG_MMC_UNSAFE_RESUME
ARM: imx_v4_v5_defconfig: Select CONFIG_MMC_UNSAFE_RESUME
...
Diffstat (limited to 'net/sctp/associola.c')
-rw-r--r-- | net/sctp/associola.c | 82 |
1 files changed, 17 insertions, 65 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5ae609200674..f558433537b8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) return false; } -/* Increase asoc's rwnd by len and send any window update SACK if needed. */ -void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) +/* Update asoc's rwnd for the approximated state in the buffer, + * and check whether SACK needs to be sent. + */ +void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) { + int rx_count; struct sctp_chunk *sack; struct timer_list *timer; - if (asoc->rwnd_over) { - if (asoc->rwnd_over >= len) { - asoc->rwnd_over -= len; - } else { - asoc->rwnd += (len - asoc->rwnd_over); - asoc->rwnd_over = 0; - } - } else { - asoc->rwnd += len; - } + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); - /* If we had window pressure, start recovering it - * once our rwnd had reached the accumulated pressure - * threshold. The idea is to recover slowly, but up - * to the initial advertised window. - */ - if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { - int change = min(asoc->pathmtu, asoc->rwnd_press); - asoc->rwnd += change; - asoc->rwnd_press -= change; - } + if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) + asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; + else + asoc->rwnd = 0; - pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, - asoc->a_rwnd); + pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", + __func__, asoc, asoc->rwnd, rx_count, + asoc->base.sk->sk_rcvbuf); /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ - if (sctp_peer_needs_update(asoc)) { + if (update_peer && sctp_peer_needs_update(asoc)) { asoc->a_rwnd = asoc->rwnd; pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " @@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) } } -/* Decrease asoc's rwnd by len. */ -void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) -{ - int rx_count; - int over = 0; - - if (unlikely(!asoc->rwnd || asoc->rwnd_over)) - pr_debug("%s: association:%p has asoc->rwnd:%u, " - "asoc->rwnd_over:%u!\n", __func__, asoc, - asoc->rwnd, asoc->rwnd_over); - - if (asoc->ep->rcvbuf_policy) - rx_count = atomic_read(&asoc->rmem_alloc); - else - rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); - - /* If we've reached or overflowed our receive buffer, announce - * a 0 rwnd if rwnd would still be positive. Store the - * the potential pressure overflow so that the window can be restored - * back to original value. - */ - if (rx_count >= asoc->base.sk->sk_rcvbuf) - over = 1; - - if (asoc->rwnd >= len) { - asoc->rwnd -= len; - if (over) { - asoc->rwnd_press += asoc->rwnd; - asoc->rwnd = 0; - } - } else { - asoc->rwnd_over = len - asoc->rwnd; - asoc->rwnd = 0; - } - - pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, - asoc->rwnd_press); -} /* Build the bind address list for the association based on info from the * local endpoint and the remote peer. |