summaryrefslogtreecommitdiff
path: root/kernel/irq/affinity.c
blob: 7812fecc6e2fb0b6f49af2a8622739bebb588f9d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212

#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>

static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
				int cpus_per_vec)
{
	const struct cpumask *siblmsk;
	int cpu, sibl;

	for ( ; cpus_per_vec > 0; ) {
		cpu = cpumask_first(nmsk);

		/* Should not happen, but I'm too lazy to think about it */
		if (cpu >= nr_cpu_ids)
			return;

		cpumask_clear_cpu(cpu, nmsk);
		cpumask_set_cpu(cpu, irqmsk);
		cpus_per_vec--;

		/* If the cpu has siblings, use them first */
		siblmsk = topology_sibling_cpumask(cpu);
		for (sibl = -1; cpus_per_vec > 0; ) {
			sibl = cpumask_next(sibl, siblmsk);
			if (sibl >= nr_cpu_ids)
				break;
			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
				continue;
			cpumask_set_cpu(sibl, irqmsk);
			cpus_per_vec--;
		}
	}
}

static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
{
	int n, nodes;

	/* Calculate the number of nodes in the supplied affinity mask */
	for (n = 0, nodes = 0; n < num_online_nodes(); n++) {
		if (cpumask_intersects(mask, cpumask_of_node(n))) {
			node_set(n, *nodemsk);
			nodes++;
		}
	}
	return nodes;
}

/**
 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
 * @affinity:		The affinity mask to spread. If NULL cpu_online_mask
 *			is used
 * @nvecs:		The number of vectors
 *
 * Returns the masks pointer or NULL if allocation failed.
 */
struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
					  int nvec)
{
	int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
	nodemask_t nodemsk = NODE_MASK_NONE;
	struct cpumask *masks;
	cpumask_var_t nmsk;

	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
		return NULL;

	masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
	if (!masks)
		goto out;

	/* Stabilize the cpumasks */
	get_online_cpus();
	/* If the supplied affinity mask is NULL, use cpu online mask */
	if (!affinity)
		affinity = cpu_online_mask;

	nodes = get_nodes_in_cpumask(affinity, &nodemsk);

	/*
	 * If the number of nodes in the mask is less than or equal the
	 * number of vectors we just spread the vectors across the nodes.
	 */
	if (nvec <= nodes) {
		for_each_node_mask(n, nodemsk) {
			cpumask_copy(masks + curvec, cpumask_of_node(n));
			if (++curvec == nvec)
				break;
		}
		goto outonl;
	}

	/* Spread the vectors per node */
	vecs_per_node = nvec / nodes;
	/* Account for rounding errors */
	extra_vecs = nvec - (nodes * vecs_per_node);

	for_each_node_mask(n, nodemsk) {
		int ncpus, v, vecs_to_assign = vecs_per_node;

		/* Get the cpus on this node which are in the mask */
		cpumask_and(nmsk, affinity, cpumask_of_node(n));

		/* Calculate the number of cpus per vector */
		ncpus = cpumask_weight(nmsk);

		for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
			cpus_per_vec = ncpus / vecs_to_assign;

			/* Account for extra vectors to compensate rounding errors */
			if (extra_vecs) {
				cpus_per_vec++;
				if (!--extra_vecs)
					vecs_per_node++;
			}
			irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
		}

		if (curvec >= nvec)
			break;
	}

outonl:
	put_online_cpus();
out:
	free_cpumask_var(nmsk);
	return masks;
}

/**
 * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
 * @affinity:		The affinity mask to spread. If NULL cpu_online_mask
 *			is used
 * @maxvec:		The maximum number of vectors available
 */
int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
{
	int cpus, ret;

	/* Stabilize the cpumasks */
	get_online_cpus();
	/* If the supplied affinity mask is NULL, use cpu online mask */
	if (!affinity)
		affinity = cpu_online_mask;

	cpus = cpumask_weight(affinity);
	ret = (cpus < maxvec) ? cpus : maxvec;

	put_online_cpus();
	return ret;
}

static int get_first_sibling(unsigned int cpu)
{
	unsigned int ret;

	ret = cpumask_first(topology_sibling_cpumask(cpu));
	if (ret < nr_cpu_ids)
		return ret;
	return cpu;
}

/*
 * Take a map of online CPUs and the number of available interrupt vectors
 * and generate an output cpumask suitable for spreading MSI/MSI-X vectors
 * so that they are distributed as good as possible around the CPUs.  If
 * more vectors than CPUs are available we'll map one to each CPU,
 * otherwise we map one to the first sibling of each socket.
 *
 * If there are more vectors than CPUs we will still only have one bit
 * set per CPU, but interrupt code will keep on assigning the vectors from
 * the start of the bitmap until we run out of vectors.
 */
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
{
	struct cpumask *affinity_mask;
	unsigned int max_vecs = *nr_vecs;

	if (max_vecs == 1)
		return NULL;

	affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
	if (!affinity_mask) {
		*nr_vecs = 1;
		return NULL;
	}

	get_online_cpus();
	if (max_vecs >= num_online_cpus()) {
		cpumask_copy(affinity_mask, cpu_online_mask);
		*nr_vecs = num_online_cpus();
	} else {
		unsigned int vecs = 0, cpu;

		for_each_online_cpu(cpu) {
			if (cpu == get_first_sibling(cpu)) {
				cpumask_set_cpu(cpu, affinity_mask);
				vecs++;
			}

			if (--max_vecs == 0)
				break;
		}
		*nr_vecs = vecs;
	}
	put_online_cpus();

	return affinity_mask;
}