1/*
2 * Copyright (c) 2019  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "memtank.h"
17#include <inttypes.h>
18
19#define CHUNK_OBJ_LT_NUM	4
20
21struct mchunk_stat {
22	uint32_t nb_empty;
23	uint32_t nb_full;
24	struct {
25		uint32_t nb_chunk;
26		uint32_t nb_obj;
27		struct {
28			uint32_t val;
29			uint32_t num;
30		} chunk_obj_lt[CHUNK_OBJ_LT_NUM];
31	} used;
32};
33
34struct mfree_stat {
35	uint32_t nb_chunk;
36	struct mchunk_stat chunk;
37};
38
39#define	MTANK_LOG(lvl, fmt, args...)	RTE_LOG(lvl, USER1, fmt, ##args)
40
41
42static void
43mchunk_stat_dump(FILE *f, const struct mchunk_stat *st)
44{
45	uint32_t i;
46
47	fprintf(f, "\t\tstat={\n");
48	fprintf(f, "\t\t\tnb_empty=%u,\n", st->nb_empty);
49	fprintf(f, "\t\t\tnb_full=%u,\n", st->nb_full);
50	fprintf(f, "\t\t\tused={\n");
51	fprintf(f, "\t\t\t\tnb_chunk=%u,\n", st->used.nb_chunk);
52	fprintf(f, "\t\t\t\tnb_obj=%u,\n", st->used.nb_obj);
53
54	for (i = 0; i != RTE_DIM(st->used.chunk_obj_lt); i++) {
55		if (st->used.chunk_obj_lt[i].num != 0)
56			fprintf(f, "\t\t\t\tnb_chunk_obj_lt_%u=%u,\n",
57				st->used.chunk_obj_lt[i].val,
58				st->used.chunk_obj_lt[i].num);
59	}
60
61	fprintf(f, "\t\t\t},\n");
62	fprintf(f, "\t\t},\n");
63}
64
65static void
66mchunk_stat_init(struct mchunk_stat *st, uint32_t nb_obj_chunk)
67{
68	uint32_t i;
69
70	memset(st, 0, sizeof(*st));
71	for (i = 0; i != RTE_DIM(st->used.chunk_obj_lt); i++) {
72		st->used.chunk_obj_lt[i].val = (i + 1) * nb_obj_chunk /
73			RTE_DIM(st->used.chunk_obj_lt);
74	}
75}
76
77static void
78mchunk_stat_collect(struct mchunk_stat *st, const struct memchunk *ch)
79{
80	uint32_t i, n;
81
82	n = ch->nb_total - ch->nb_free;
83
84	if (ch->nb_free == 0)
85		st->nb_empty++;
86	else if (n == 0)
87		st->nb_full++;
88	else {
89		st->used.nb_chunk++;
90		st->used.nb_obj += n;
91
92		for (i = 0; i != RTE_DIM(st->used.chunk_obj_lt); i++) {
93			if (n < st->used.chunk_obj_lt[i].val) {
94				st->used.chunk_obj_lt[i].num++;
95				break;
96			}
97		}
98	}
99}
100
101static void
102mchunk_list_dump(FILE *f, struct memtank *mt, uint32_t idx, uint32_t flags)
103{
104	struct mchunk_list *ls;
105	const struct memchunk *ch;
106	struct mchunk_stat mcs;
107
108	ls = &mt->chl[idx];
109	mchunk_stat_init(&mcs, mt->prm.nb_obj_chunk);
110
111	rte_spinlock_lock(&ls->lock);
112
113	for (ch = TAILQ_FIRST(&ls->chunk); ch != NULL;
114			ch = TAILQ_NEXT(ch, link)) {
115
116		/* collect chunk stats */
117		if (flags & TLE_MTANK_DUMP_CHUNK_STAT)
118			mchunk_stat_collect(&mcs, ch);
119
120		/* dump chunk metadata */
121		if (flags & TLE_MTANK_DUMP_CHUNK) {
122			fprintf(f, "\t\tmemchunk@%p={\n", ch);
123			fprintf(f, "\t\t\traw=%p,\n", ch->raw);
124			fprintf(f, "\t\t\tnb_total=%u,\n", ch->nb_total);
125			fprintf(f, "\t\t\tnb_free=%u,\n", ch->nb_free);
126			fprintf(f, "\t\t},\n");
127		}
128	}
129
130	rte_spinlock_unlock(&ls->lock);
131
132	/* print chunk stats */
133	if (flags & TLE_MTANK_DUMP_CHUNK_STAT)
134		mchunk_stat_dump(f, &mcs);
135}
136
137static void
138mfree_stat_init(struct mfree_stat *st, uint32_t nb_obj_chunk)
139{
140	st->nb_chunk = 0;
141	mchunk_stat_init(&st->chunk, nb_obj_chunk);
142}
143
144static int
145ptr_cmp(const void *p1, const void *p2)
146{
147	const intptr_t *v1, *v2;
148
149	v1 = p1;
150	v2 = p2;
151	return v1[0] - v2[0];
152}
153
154static void
155mfree_stat_collect(struct mfree_stat *st, struct memtank *mt)
156{
157	uint32_t i, j, n, sz;
158	uintptr_t *p;
159	const struct memobj *mo;
160
161	sz = mt->obj_size;
162
163	p = malloc(mt->pub.max_free * sizeof(*p));
164	if (p == NULL)
165		return;
166
167	/**
168	 * grab free lock and keep it till we analyze related memchunks,
169	 * to make sure none of these memchunks will be freed untill
170	 * we are finished.
171	 */
172	rte_spinlock_lock(&mt->pub.lock);
173
174	/* collect chunks for all objects in free[] */
175	n = mt->pub.nb_free;
176	memcpy(p, mt->pub.free, n * sizeof(*p));
177	for (i = 0; i != n; i++) {
178		mo = obj_pub_full(p[i], sz);
179		p[i] = (uintptr_t)mo->chunk;
180	}
181
182	/* sort chunk pointers */
183	qsort(p, n, sizeof(*p), ptr_cmp);
184
185	/* for each chunk collect stats */
186	for (i = 0; i != n; i = j) {
187
188		st->nb_chunk++;
189		mchunk_stat_collect(&st->chunk, (const struct memchunk *)p[i]);
190		for (j = i + 1; j != n && p[i] == p[j]; j++)
191			;
192	}
193
194	rte_spinlock_unlock(&mt->pub.lock);
195	free(p);
196}
197
198static void
199mfree_stat_dump(FILE *f, const struct mfree_stat *st)
200{
201	fprintf(f, "\tfree_stat={\n");
202	fprintf(f, "\t\tnb_chunk=%u,\n", st->nb_chunk);
203	mchunk_stat_dump(f, &st->chunk);
204	fprintf(f, "\t},\n");
205}
206
207void
208tle_memtank_dump(FILE *f, const struct tle_memtank *t, uint32_t flags)
209{
210	struct memtank *mt;
211
212	if (f == NULL || t == NULL)
213		return;
214
215	mt = tank_pub_full(t);
216
217	fprintf(f, "tle_memtank@%p={\n", t);
218	fprintf(f, "\tmin_free=%u,\n", t->min_free);
219	fprintf(f, "\tmax_free=%u,\n", t->max_free);
220	fprintf(f, "\tnb_free=%u,\n", t->nb_free);
221	fprintf(f, "\tchunk_size=%zu,\n", mt->chunk_size);
222	fprintf(f, "\tobj_size=%u,\n", mt->obj_size);
223	fprintf(f, "\tmax_chunk=%u,\n", mt->max_chunk);
224	fprintf(f, "\tflags=%#x,\n", mt->flags);
225	fprintf(f, "\tnb_chunks=%u,\n", rte_atomic32_read(&mt->nb_chunks));
226
227	if (flags & TLE_MTANK_DUMP_FREE_STAT) {
228		struct mfree_stat mfs;
229		mfree_stat_init(&mfs, mt->prm.nb_obj_chunk);
230		mfree_stat_collect(&mfs, mt);
231		mfree_stat_dump(f, &mfs);
232	}
233
234	if (flags & (TLE_MTANK_DUMP_CHUNK | TLE_MTANK_DUMP_CHUNK_STAT)) {
235
236		fprintf(f, "\t[FULL]={\n");
237		mchunk_list_dump(f, mt, MC_FULL, flags);
238		fprintf(f, "\t},\n");
239
240		fprintf(f, "\t[USED]={,\n");
241		mchunk_list_dump(f, mt, MC_USED, flags);
242		fprintf(f, "\t},\n");
243	}
244	fprintf(f, "};\n");
245}
246
247static int
248mobj_bulk_check(const char *fname, const struct memtank *mt,
249	const uintptr_t p[], uint32_t num, uint32_t fmsk)
250{
251	int32_t ret;
252	uintptr_t align;
253	uint32_t i, k, sz;
254	const struct memobj *mo;
255
256	k = ((mt->flags & TLE_MTANK_OBJ_DBG) != 0) & fmsk;
257	sz = mt->obj_size;
258	align = mt->prm.obj_align - 1;
259
260	ret = 0;
261	for (i = 0; i != num; i++) {
262
263		if (p[i] == (uintptr_t)NULL) {
264			ret--;
265			MTANK_LOG(ERR,
266				"%s(mt=%p, %p[%u]): NULL object\n",
267				fname, mt, p, i);
268		} else if ((p[i] & align) != 0) {
269			ret--;
270			MTANK_LOG(ERR,
271				"%s(mt=%p, %p[%u]): object %#zx violates "
272				"expected alignment %#zx\n",
273				fname, mt, p, i, p[i], align);
274		} else {
275			mo = obj_pub_full(p[i], sz);
276			if (memobj_verify(mo, k) != 0) {
277				ret--;
278				MTANK_LOG(ERR,
279					"%s(mt=%p, %p[%u]): "
280					"invalid object header @%#zx={"
281					"red_zone1=%#" PRIx64 ","
282					"dbg={nb_alloc=%u,nb_free=%u},"
283					"red_zone2=%#" PRIx64
284					"}\n",
285					fname, mt, p, i, p[i],
286					mo->red_zone1,
287					mo->dbg.nb_alloc, mo->dbg.nb_free,
288					mo->red_zone2);
289			}
290		}
291	}
292
293	return ret;
294}
295
296/* grab free lock and check objects in free[] */
297static int
298mfree_check(struct memtank *mt)
299{
300	int32_t rc;
301
302	rte_spinlock_lock(&mt->pub.lock);
303	rc = mobj_bulk_check(__func__, mt, (const uintptr_t *)mt->pub.free,
304		mt->pub.nb_free, 1);
305	rte_spinlock_unlock(&mt->pub.lock);
306	return rc;
307}
308
309static int
310mchunk_check(const struct memtank *mt, const struct memchunk *mc, uint32_t tc)
311{
312	int32_t n, rc;
313
314	rc = 0;
315	n = mc->nb_total - mc->nb_free;
316
317	rc -= (mc->nb_total != mt->prm.nb_obj_chunk);
318	rc -= (tc == MC_FULL) ? (n != 0) : (n <= 0);
319	rc -= (RTE_PTR_ALIGN_CEIL(mc->raw, alignof(*mc)) != mc);
320
321	if (rc != 0)
322		MTANK_LOG(ERR, "%s(mt=%p, tc=%u): invalid memchunk @%p={"
323			"raw=%p, nb_total=%u, nb_free=%u}\n",
324			__func__, mt, tc, mc,
325			mc->raw, mc->nb_total, mc->nb_free);
326
327	rc += mobj_bulk_check(__func__, mt, (const uintptr_t *)mc->free,
328		mc->nb_free, 0);
329	return rc;
330}
331
332static int
333mchunk_list_check(struct memtank *mt, uint32_t tc, uint32_t *nb_chunk)
334{
335	int32_t rc;
336	uint32_t n;
337	struct mchunk_list *ls;
338	const struct memchunk *ch;
339
340	ls = &mt->chl[tc];
341	rte_spinlock_lock(&ls->lock);
342
343	rc = 0;
344	for (n = 0, ch = TAILQ_FIRST(&ls->chunk); ch != NULL;
345			ch = TAILQ_NEXT(ch, link), n++)
346		rc += mchunk_check(mt, ch, tc);
347
348	rte_spinlock_unlock(&ls->lock);
349
350	*nb_chunk = n;
351	return rc;
352}
353
354int
355tle_memtank_sanity_check(const struct tle_memtank *t, int32_t ct)
356{
357	int32_t rc;
358	uint32_t n, nf, nu;
359	struct memtank *mt;
360
361	mt = tank_pub_full(t);
362	rc = mfree_check(mt);
363
364	nf = 0, nu = 0;
365	rc += mchunk_list_check(mt, MC_FULL, &nf);
366	rc += mchunk_list_check(mt, MC_USED, &nu);
367
368	/*
369	 * if some other threads concurently do alloc/free/grow/shrink
370	 * these numbers can still not match.
371	 */
372	n = rte_atomic32_read(&mt->nb_chunks);
373	if (nf + nu != n && ct == 0) {
374		MTANK_LOG(ERR,
375			"%s(mt=%p) nb_chunks: expected=%u, full=%u, used=%u\n",
376			__func__, mt, n, nf, nu);
377		rc--;
378	}
379
380	return rc;
381}
382