summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 16:26:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 16:26:30 -0700
commita7fd20d1c476af4563e66865213474a2f9f473a4 (patch)
treefb1399e2f82842450245fb058a8fb23c52865f43 /fs
parentb80fed9595513384424cd141923c9161c4b5021b (diff)
parent917fa5353da05e8a0045b8acacba8d50400d5b12 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support SPI based w5100 devices, from Akinobu Mita. 2) Partial Segmentation Offload, from Alexander Duyck. 3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE. 4) Allow cls_flower stats offload, from Amir Vadai. 5) Implement bpf blinding, from Daniel Borkmann. 6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is actually using FASYNC these atomics are superfluous. From Eric Dumazet. 7) Run TCP more preemptibly, also from Eric Dumazet. 8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e driver, from Gal Pressman. 9) Allow creating ppp devices via rtnetlink, from Guillaume Nault. 10) Improve BPF usage documentation, from Jesper Dangaard Brouer. 11) Support tunneling offloads in qed, from Manish Chopra. 12) aRFS offloading in mlx5e, from Maor Gottlieb. 13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo Leitner. 14) Add MSG_EOR support to TCP, this allows controlling packet coalescing on application record boundaries for more accurate socket timestamp sampling. From Martin KaFai Lau. 15) Fix alignment of 64-bit netlink attributes across the board, from Nicolas Dichtel. 16) Per-vlan stats in bridging, from Nikolay Aleksandrov. 17) Several conversions of drivers to ethtool ksettings, from Philippe Reynes. 18) Checksum neutral ILA in ipv6, from Tom Herbert. 19) Factorize all of the various marvell dsa drivers into one, from Vivien Didelot 20) Add VF support to qed driver, from Yuval Mintz" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits) Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" Revert "phy dp83867: Make rgmii parameters optional" r8169: default to 64-bit DMA on recent PCIe chips phy dp83867: Make rgmii parameters optional phy dp83867: Fix compilation with CONFIG_OF_MDIO=m bpf: arm64: remove callee-save registers use for tmp registers asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions switchdev: pass pointer to fib_info instead of copy net_sched: close another race condition in tcf_mirred_release() tipc: fix nametable publication field in nl compat drivers: net: Don't print unpopulated net_device name qed: add support for dcbx. ravb: Add missing free_irq() calls to ravb_close() qed: Remove a stray tab net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fec-mpc52xx: use phydev from struct net_device bpf, doc: fix typo on bpf_asm descriptions stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fs-enet: use phydev from struct net_device ...
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/rxrpc.c30
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/quota/netlink.c12
4 files changed, 35 insertions, 15 deletions
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index b50642870a43..63cd9f939f19 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -65,6 +65,12 @@ static void afs_async_workfn(struct work_struct *work)
call->async_workfn(call);
}
+static int afs_wait_atomic_t(atomic_t *p)
+{
+ schedule();
+ return 0;
+}
+
/*
* open an RxRPC socket and bind it to be a server for callback notifications
* - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -126,13 +132,16 @@ void afs_close_socket(void)
{
_enter("");
+ wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
+ TASK_UNINTERRUPTIBLE);
+ _debug("no outstanding calls");
+
sock_release(afs_socket);
_debug("dework");
destroy_workqueue(afs_async_calls);
ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
- ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
_leave("");
}
@@ -178,8 +187,6 @@ static void afs_free_call(struct afs_call *call)
{
_debug("DONE %p{%s} [%d]",
call, call->type->name, atomic_read(&afs_outstanding_calls));
- if (atomic_dec_return(&afs_outstanding_calls) == -1)
- BUG();
ASSERTCMP(call->rxcall, ==, NULL);
ASSERT(!work_pending(&call->async_work));
@@ -188,6 +195,9 @@ static void afs_free_call(struct afs_call *call)
kfree(call->request);
kfree(call);
+
+ if (atomic_dec_and_test(&afs_outstanding_calls))
+ wake_up_atomic_t(&afs_outstanding_calls);
}
/*
@@ -420,9 +430,11 @@ error_kill_call:
}
/*
- * handles intercepted messages that were arriving in the socket's Rx queue
- * - called with the socket receive queue lock held to ensure message ordering
- * - called with softirqs disabled
+ * Handles intercepted messages that were arriving in the socket's Rx queue.
+ *
+ * Called from the AF_RXRPC call processor in waitqueue process context. For
+ * each call, it is guaranteed this will be called in order of packet to be
+ * delivered.
*/
static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
struct sk_buff *skb)
@@ -513,6 +525,12 @@ static void afs_deliver_to_call(struct afs_call *call)
call->state = AFS_CALL_ABORTED;
_debug("Rcv ABORT %u -> %d", abort_code, call->error);
break;
+ case RXRPC_SKB_MARK_LOCAL_ABORT:
+ abort_code = rxrpc_kernel_get_abort_code(skb);
+ call->error = call->type->abort_to_error(abort_code);
+ call->state = AFS_CALL_ABORTED;
+ _debug("Loc ABORT %u -> %d", abort_code, call->error);
+ break;
case RXRPC_SKB_MARK_NET_ERROR:
call->error = -rxrpc_kernel_get_error_number(skb);
call->state = AFS_CALL_ERROR;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6f62ac821a84..2e2e0a6242d6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2918,7 +2918,7 @@ static inline void
cifs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
+ BUG_ON(!sock_allow_reclassification(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
}
@@ -2927,7 +2927,7 @@ static inline void
cifs_reclassify_socket6(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
+ BUG_ON(!sock_allow_reclassification(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6539131c52a2..4b73bd101bdc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1913,7 +1913,7 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+ ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
}
return ret;
}
@@ -1941,7 +1941,7 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+ ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
}
return ret;
}
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
index d07a2f91d858..8b252673d454 100644
--- a/fs/quota/netlink.c
+++ b/fs/quota/netlink.c
@@ -47,7 +47,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
- 2 * nla_total_size(sizeof(u64));
+ 2 * nla_total_size_64bit(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
@@ -68,8 +68,9 @@ void quota_send_warning(struct kqid qid, dev_t dev,
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
if (ret)
goto attr_err_out;
- ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID,
- from_kqid_munged(&init_user_ns, qid));
+ ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
+ from_kqid_munged(&init_user_ns, qid),
+ QUOTA_NL_A_PAD);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
@@ -81,8 +82,9 @@ void quota_send_warning(struct kqid qid, dev_t dev,
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
if (ret)
goto attr_err_out;
- ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID,
- from_kuid_munged(&init_user_ns, current_uid()));
+ ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
+ from_kuid_munged(&init_user_ns, current_uid()),
+ QUOTA_NL_A_PAD);
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);