diff options
Diffstat (limited to 'fs/smb/server/connection.c')
| -rw-r--r-- | fs/smb/server/connection.c | 79 |
1 files changed, 56 insertions, 23 deletions
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index e6a72f75ab94..b6b4f1286b9c 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -19,7 +19,7 @@ static DEFINE_MUTEX(init_lock); static struct ksmbd_conn_ops default_conn_ops; -LIST_HEAD(conn_list); +DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS); DECLARE_RWSEM(conn_list_lock); /** @@ -33,14 +33,16 @@ DECLARE_RWSEM(conn_list_lock); void ksmbd_conn_free(struct ksmbd_conn *conn) { down_write(&conn_list_lock); - list_del(&conn->conns_list); + hash_del(&conn->hlist); up_write(&conn_list_lock); xa_destroy(&conn->sessions); kvfree(conn->request_buf); kfree(conn->preauth_info); - if (atomic_dec_and_test(&conn->refcnt)) + if (atomic_dec_and_test(&conn->refcnt)) { + conn->transport->ops->free_transport(conn->transport); kfree(conn); + } } /** @@ -52,7 +54,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void) { struct ksmbd_conn *conn; - conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL); + conn = kzalloc(sizeof(struct ksmbd_conn), KSMBD_DEFAULT_GFP); if (!conn) return NULL; @@ -70,13 +72,11 @@ struct ksmbd_conn *ksmbd_conn_alloc(void) atomic_set(&conn->req_running, 0); atomic_set(&conn->r_count, 0); atomic_set(&conn->refcnt, 1); - atomic_set(&conn->mux_smb_requests, 0); conn->total_credits = 1; conn->outstanding_credits = 0; init_waitqueue_head(&conn->req_running_q); init_waitqueue_head(&conn->r_count_q); - INIT_LIST_HEAD(&conn->conns_list); INIT_LIST_HEAD(&conn->requests); INIT_LIST_HEAD(&conn->async_requests); spin_lock_init(&conn->request_lock); @@ -89,19 +89,17 @@ struct ksmbd_conn *ksmbd_conn_alloc(void) init_rwsem(&conn->session_lock); - down_write(&conn_list_lock); - list_add(&conn->conns_list, &conn_list); - up_write(&conn_list_lock); return conn; } bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) { struct ksmbd_conn *t; + int bkt; bool ret = false; down_read(&conn_list_lock); - list_for_each_entry(t, &conn_list, conns_list) { + hash_for_each(conn_list, bkt, t, hlist) { if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) continue; @@ -120,8 +118,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work) if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) requests_queue = &conn->requests; + atomic_inc(&conn->req_running); if (requests_queue) { - atomic_inc(&conn->req_running); spin_lock(&conn->request_lock); list_add_tail(&work->request_entry, requests_queue); spin_unlock(&conn->request_lock); @@ -132,11 +130,14 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; + atomic_dec(&conn->req_running); + if (waitqueue_active(&conn->req_running_q)) + wake_up(&conn->req_running_q); + if (list_empty(&work->request_entry) && list_empty(&work->async_request_entry)) return; - atomic_dec(&conn->req_running); spin_lock(&conn->request_lock); list_del_init(&work->request_entry); spin_unlock(&conn->request_lock); @@ -159,9 +160,10 @@ void ksmbd_conn_unlock(struct ksmbd_conn *conn) void ksmbd_all_conn_set_status(u64 sess_id, u32 status) { struct ksmbd_conn *conn; + int bkt; down_read(&conn_list_lock); - list_for_each_entry(conn, &conn_list, conns_list) { + hash_for_each(conn_list, bkt, conn, hlist) { if (conn->binding || xa_load(&conn->sessions, sess_id)) WRITE_ONCE(conn->status, status); } @@ -177,14 +179,14 @@ int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) { struct ksmbd_conn *conn; int rc, retry_count = 0, max_timeout = 120; - int rcount = 1; + int rcount = 1, bkt; retry_idle: if (retry_count >= max_timeout) return -EIO; down_read(&conn_list_lock); - list_for_each_entry(conn, &conn_list, conns_list) { + hash_for_each(conn_list, bkt, conn, hlist) { if (conn->binding || xa_load(&conn->sessions, sess_id)) { if (conn == curr_conn) rcount = 2; @@ -239,7 +241,7 @@ int ksmbd_conn_write(struct ksmbd_work *work) int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf, unsigned int buflen, - struct smb2_buffer_desc_v1 *desc, + struct smbdirect_buffer_descriptor_v1 *desc, unsigned int desc_len) { int ret = -EINVAL; @@ -253,7 +255,7 @@ int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf, unsigned int buflen, - struct smb2_buffer_desc_v1 *desc, + struct smbdirect_buffer_descriptor_v1 *desc, unsigned int desc_len) { int ret = -EINVAL; @@ -308,7 +310,7 @@ int ksmbd_conn_handler_loop(void *p) { struct ksmbd_conn *conn = (struct ksmbd_conn *)p; struct ksmbd_transport *t = conn->transport; - unsigned int pdu_size, max_allowed_pdu_size; + unsigned int pdu_size, max_allowed_pdu_size, max_req; char hdr_buf[4] = {0,}; int size; @@ -318,6 +320,7 @@ int ksmbd_conn_handler_loop(void *p) if (t->ops->prepare && t->ops->prepare(t)) goto out; + max_req = server_conf.max_inflight_req; conn->last_active = jiffies; set_freezable(); while (ksmbd_conn_alive(conn)) { @@ -327,6 +330,13 @@ int ksmbd_conn_handler_loop(void *p) kvfree(conn->request_buf); conn->request_buf = NULL; +recheck: + if (atomic_read(&conn->req_running) + 1 > max_req) { + wait_event_interruptible(conn->req_running_q, + atomic_read(&conn->req_running) < max_req); + goto recheck; + } + size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); if (size != sizeof(hdr_buf)) break; @@ -359,7 +369,7 @@ int ksmbd_conn_handler_loop(void *p) /* 4 for rfc1002 length field */ /* 1 for implied bcc[0] */ size = pdu_size + 4 + 1; - conn->request_buf = kvmalloc(size, GFP_KERNEL); + conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP); if (!conn->request_buf) break; @@ -404,6 +414,7 @@ int ksmbd_conn_handler_loop(void *p) out: ksmbd_conn_set_releasing(conn); /* Wait till all reference dropped to the Server object*/ + ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count)); wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); if (IS_ENABLED(CONFIG_UNICODE)) @@ -422,6 +433,26 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) default_conn_ops.terminate_fn = ops->terminate_fn; } +void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) +{ + atomic_inc(&conn->r_count); +} + +void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) +{ + /* + * Checking waitqueue to dropping pending requests on + * disconnection. waitqueue_active is safe because it + * uses atomic operation for condition. + */ + atomic_inc(&conn->refcnt); + if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) + wake_up(&conn->r_count_q); + + if (atomic_dec_and_test(&conn->refcnt)) + kfree(conn); +} + int ksmbd_conn_transport_init(void) { int ret; @@ -447,10 +478,11 @@ static void stop_sessions(void) { struct ksmbd_conn *conn; struct ksmbd_transport *t; + int bkt; again: down_read(&conn_list_lock); - list_for_each_entry(conn, &conn_list, conns_list) { + hash_for_each(conn_list, bkt, conn, hlist) { t = conn->transport; ksmbd_conn_set_exiting(conn); if (t->ops->shutdown) { @@ -461,8 +493,8 @@ again: } up_read(&conn_list_lock); - if (!list_empty(&conn_list)) { - schedule_timeout_interruptible(HZ / 10); /* 100ms */ + if (!hash_empty(conn_list)) { + msleep(100); goto again; } } @@ -471,7 +503,8 @@ void ksmbd_conn_transport_destroy(void) { mutex_lock(&init_lock); ksmbd_tcp_destroy(); - ksmbd_rdma_destroy(); + ksmbd_rdma_stop_listening(); stop_sessions(); + ksmbd_rdma_destroy(); mutex_unlock(&init_lock); } |
