-
Notifications
You must be signed in to change notification settings - Fork 0
/
lttng-tp-mempool.c
159 lines (128 loc) · 3.04 KB
/
lttng-tp-mempool.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
*
* lttng-tp-mempool.c
*
* Copyright (C) 2018 Julien Desfossez <[email protected]>
*/
#include <linux/slab.h>
#include <linux/percpu.h>
#include <lttng-tp-mempool.h>
struct lttng_tp_buf_entry {
int cpu; /* To make sure we return the entry to the right pool. */
char buf[LTTNG_TP_MEMPOOL_BUF_SIZE];
struct list_head list;
};
/*
* No exclusive access strategy for now, this memory pool is currently only
* used from a non-preemptible context, and the interrupt tracepoint probes do
* not use this facility.
*/
struct per_cpu_buf {
struct list_head free_list; /* Free struct lttng_tp_buf_entry. */
};
static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */
int lttng_tp_mempool_init(void)
{
int ret, cpu;
/* The pool is only supposed to be allocated once. */
if (pool) {
WARN_ON_ONCE(1);
ret = -1;
goto end;
}
pool = alloc_percpu(struct per_cpu_buf);
if (!pool) {
ret = -ENOMEM;
goto end;
}
for_each_possible_cpu(cpu) {
struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
INIT_LIST_HEAD(&cpu_buf->free_list);
}
for_each_possible_cpu(cpu) {
int i;
struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) {
struct lttng_tp_buf_entry *entry;
entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry),
GFP_KERNEL, cpu_to_node(cpu));
if (!entry) {
ret = -ENOMEM;
goto error_free_pool;
}
entry->cpu = cpu;
list_add_tail(&entry->list, &cpu_buf->free_list);
}
}
ret = 0;
goto end;
error_free_pool:
lttng_tp_mempool_destroy();
end:
return ret;
}
void lttng_tp_mempool_destroy(void)
{
int cpu;
if (!pool) {
return;
}
for_each_possible_cpu(cpu) {
struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
struct lttng_tp_buf_entry *entry, *tmp;
int i = 0;
list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) {
list_del(&entry->list);
kfree(entry);
i++;
}
if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) {
printk(KERN_WARNING "Leak detected in tp-mempool\n");
}
}
free_percpu(pool);
pool = NULL;
}
void *lttng_tp_mempool_alloc(size_t size)
{
void *ret;
struct lttng_tp_buf_entry *entry;
struct per_cpu_buf *cpu_buf;
int cpu = smp_processor_id();
if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) {
ret = NULL;
goto end;
}
cpu_buf = per_cpu_ptr(pool, cpu);
if (list_empty(&cpu_buf->free_list)) {
ret = NULL;
goto end;
}
entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list);
/* Remove the entry from the free list. */
list_del(&entry->list);
memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE);
ret = (void *) entry->buf;
end:
return ret;
}
void lttng_tp_mempool_free(void *ptr)
{
struct lttng_tp_buf_entry *entry;
struct per_cpu_buf *cpu_buf;
if (!ptr) {
goto end;
}
entry = container_of(ptr, struct lttng_tp_buf_entry, buf);
if (!entry) {
goto end;
}
cpu_buf = per_cpu_ptr(pool, entry->cpu);
if (!cpu_buf) {
goto end;
}
/* Add it to the free list. */
list_add_tail(&entry->list, &cpu_buf->free_list);
end:
return;
}