summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
blob: ea321e5287490d63f621e0e5165c4fdfa10e8392 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies */

#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include <linux/hashtable.h>

#include "mapping.h"

#define MAPPING_GRACE_PERIOD 2000

struct mapping_ctx {
	struct xarray xarray;
	DECLARE_HASHTABLE(ht, 8);
	struct mutex lock; /* Guards hashtable and xarray */
	unsigned long max_id;
	size_t data_size;
	bool delayed_removal;
	struct delayed_work dwork;
	struct list_head pending_list;
	spinlock_t pending_list_lock; /* Guards pending list */
};

struct mapping_item {
	struct rcu_head rcu;
	struct list_head list;
	unsigned long timeout;
	struct hlist_node node;
	int cnt;
	u32 id;
	char data[];
};

int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
{
	struct mapping_item *mi;
	int err = -ENOMEM;
	u32 hash_key;

	mutex_lock(&ctx->lock);

	hash_key = jhash(data, ctx->data_size, 0);
	hash_for_each_possible(ctx->ht, mi, node, hash_key) {
		if (!memcmp(data, mi->data, ctx->data_size))
			goto attach;
	}

	mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
	if (!mi)
		goto err_alloc;

	memcpy(mi->data, data, ctx->data_size);
	hash_add(ctx->ht, &mi->node, hash_key);

	err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
		       GFP_KERNEL);
	if (err)
		goto err_assign;
attach:
	++mi->cnt;
	*id = mi->id;

	mutex_unlock(&ctx->lock);

	return 0;

err_assign:
	hash_del(&mi->node);
	kfree(mi);
err_alloc:
	mutex_unlock(&ctx->lock);

	return err;
}

static void mapping_remove_and_free(struct mapping_ctx *ctx,
				    struct mapping_item *mi)
{
	xa_erase(&ctx->xarray, mi->id);
	kfree_rcu(mi, rcu);
}

static void mapping_free_item(struct mapping_ctx *ctx,
			      struct mapping_item *mi)
{
	if (!ctx->delayed_removal) {
		mapping_remove_and_free(ctx, mi);
		return;
	}

	mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);

	spin_lock(&ctx->pending_list_lock);
	list_add_tail(&mi->list, &ctx->pending_list);
	spin_unlock(&ctx->pending_list_lock);

	schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
}

int mapping_remove(struct mapping_ctx *ctx, u32 id)
{
	unsigned long index = id;
	struct mapping_item *mi;
	int err = -ENOENT;

	mutex_lock(&ctx->lock);
	mi = xa_load(&ctx->xarray, index);
	if (!mi)
		goto out;
	err = 0;

	if (--mi->cnt > 0)
		goto out;

	hash_del(&mi->node);
	mapping_free_item(ctx, mi);
out:
	mutex_unlock(&ctx->lock);

	return err;
}

int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
{
	unsigned long index = id;
	struct mapping_item *mi;
	int err = -ENOENT;

	rcu_read_lock();
	mi = xa_load(&ctx->xarray, index);
	if (!mi)
		goto err_find;

	memcpy(data, mi->data, ctx->data_size);
	err = 0;

err_find:
	rcu_read_unlock();
	return err;
}

static void
mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
{
	struct mapping_item *mi;

	list_for_each_entry(mi, list, list)
		mapping_remove_and_free(ctx, mi);
}

static void mapping_work_handler(struct work_struct *work)
{
	unsigned long min_timeout = 0, now = jiffies;
	struct mapping_item *mi, *next;
	LIST_HEAD(pending_items);
	struct mapping_ctx *ctx;

	ctx = container_of(work, struct mapping_ctx, dwork.work);

	spin_lock(&ctx->pending_list_lock);
	list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
		if (time_after(now, mi->timeout))
			list_move(&mi->list, &pending_items);
		else if (!min_timeout ||
			 time_before(mi->timeout, min_timeout))
			min_timeout = mi->timeout;
	}
	spin_unlock(&ctx->pending_list_lock);

	mapping_remove_and_free_list(ctx, &pending_items);

	if (min_timeout)
		schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
}

static void mapping_flush_work(struct mapping_ctx *ctx)
{
	if (!ctx->delayed_removal)
		return;

	cancel_delayed_work_sync(&ctx->dwork);
	mapping_remove_and_free_list(ctx, &ctx->pending_list);
}

struct mapping_ctx *
mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
{
	struct mapping_ctx *ctx;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return ERR_PTR(-ENOMEM);

	ctx->max_id = max_id ? max_id : UINT_MAX;
	ctx->data_size = data_size;

	if (delayed_removal) {
		INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
		INIT_LIST_HEAD(&ctx->pending_list);
		spin_lock_init(&ctx->pending_list_lock);
		ctx->delayed_removal = true;
	}

	mutex_init(&ctx->lock);
	xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);

	return ctx;
}

void mapping_destroy(struct mapping_ctx *ctx)
{
	mapping_flush_work(ctx);
	xa_destroy(&ctx->xarray);
	mutex_destroy(&ctx->lock);

	kfree(ctx);
}