summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2017-11-14 10:47:01 -0500
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2017-11-14 10:47:01 -0500
commitf2ecc3d0787e05d9145722feed01d4a11ab6bec1 (patch)
tree8b952af40dad67d728f1e421efc640907c523982 /mm
parentb1cb7372fa822af6c06c8045963571d13ad6348b (diff)
parentc14dd9d5f8beda9d8c621683b4e7d6cb5cd3cda7 (diff)
Merge tag 'staging-4.15-rc1' into v4l_for_linus
There are some conflicts between staging and media trees, as reported by Stephen Rothwell <sfr@canb.auug.org.au>. So, merge from staging. * tag 'staging-4.15-rc1': (775 commits) staging: lustre: add SPDX identifiers to all lustre files staging: greybus: Remove redundant license text staging: greybus: add SPDX identifiers to all greybus driver files staging: ccree: simplify ioread/iowrite staging: ccree: simplify registers access staging: ccree: simplify error handling logic staging: ccree: remove dead code staging: ccree: handle limiting of DMA masks staging: ccree: copy IV to DMAable memory staging: fbtft: remove redundant initialization of buf staging: sm750fb: Fix parameter mistake in poke32 staging: wilc1000: Fix bssid buffer offset in Txq staging: fbtft: fb_ssd1331: fix mirrored display staging: android: Fix checkpatch.pl error staging: greybus: loopback: convert loopback to use generic async operations staging: greybus: operation: add private data with get/set accessors staging: greybus: loopback: Fix iteration count on async path staging: greybus: loopback: Hold per-connection mutex across operations staging: greybus/loopback: use ktime_get() for time intervals staging: fsl-dpaa2/eth: Extra headroom in RX buffers ... Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/percpu.c15
2 files changed, 10 insertions, 20 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5f3a62887cf..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!mem_cgroup_sockets_enabled)
return;
- /*
- * Socket cloning can throw us here with sk_memcg already
- * filled. It won't however, necessarily happen from
- * process context. So the test for root memcg given
- * the current task's memcg won't help us in this case.
- *
- * Respecting the original socket's memcg is a better
- * decision in this case.
- */
- if (sk->sk_memcg) {
- BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
- css_get(&sk->sk_memcg->css);
- return;
- }
-
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
* @gfp: allocation flags
*
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
- * contain %GFP_KERNEL, the allocation is atomic.
+ * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
+ * then no warning will be triggered on invalid or failed allocation
+ * requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp)
{
+ bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+ bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10;
struct pcpu_chunk *chunk;
const char *err;
- bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
int slot, off, cpu, ret;
unsigned long flags;
void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
!is_power_of_2(align))) {
- WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+ WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
}
@@ -1482,7 +1485,7 @@ fail_unlock:
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
- if (!is_atomic && warn_limit) {
+ if (!is_atomic && do_warn && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
dump_stack();
@@ -1507,7 +1510,9 @@ fail:
*
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
- * be called from any context but is a lot more likely to fail.
+ * be called from any context but is a lot more likely to fail. If @gfp
+ * has __GFP_NOWARN then no warning will be triggered on invalid or failed
+ * allocation requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.