]> asedeno.scripts.mit.edu Git - linux.git/blob - include/net/fq_impl.h
Merge tag 'mips_5.2_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[linux.git] / include / net / fq_impl.h
1 /*
2  * Copyright (c) 2016 Qualcomm Atheros, Inc
3  *
4  * GPL v2
5  *
6  * Based on net/sched/sch_fq_codel.c
7  */
8 #ifndef __NET_SCHED_FQ_IMPL_H
9 #define __NET_SCHED_FQ_IMPL_H
10
11 #include <net/fq.h>
12
13 /* functions that are embedded into includer */
14
15 static void fq_adjust_removal(struct fq *fq,
16                               struct fq_flow *flow,
17                               struct sk_buff *skb)
18 {
19         struct fq_tin *tin = flow->tin;
20
21         tin->backlog_bytes -= skb->len;
22         tin->backlog_packets--;
23         flow->backlog -= skb->len;
24         fq->backlog--;
25         fq->memory_usage -= skb->truesize;
26 }
27
28 static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
29 {
30         struct fq_flow *i;
31
32         if (flow->backlog == 0) {
33                 list_del_init(&flow->backlogchain);
34         } else {
35                 i = flow;
36
37                 list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
38                         if (i->backlog < flow->backlog)
39                                 break;
40
41                 list_move_tail(&flow->backlogchain,
42                                &i->backlogchain);
43         }
44 }
45
46 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
47                                        struct fq_flow *flow)
48 {
49         struct sk_buff *skb;
50
51         lockdep_assert_held(&fq->lock);
52
53         skb = __skb_dequeue(&flow->queue);
54         if (!skb)
55                 return NULL;
56
57         fq_adjust_removal(fq, flow, skb);
58         fq_rejigger_backlog(fq, flow);
59
60         return skb;
61 }
62
63 static struct sk_buff *fq_tin_dequeue(struct fq *fq,
64                                       struct fq_tin *tin,
65                                       fq_tin_dequeue_t dequeue_func)
66 {
67         struct fq_flow *flow;
68         struct list_head *head;
69         struct sk_buff *skb;
70
71         lockdep_assert_held(&fq->lock);
72
73 begin:
74         head = &tin->new_flows;
75         if (list_empty(head)) {
76                 head = &tin->old_flows;
77                 if (list_empty(head))
78                         return NULL;
79         }
80
81         flow = list_first_entry(head, struct fq_flow, flowchain);
82
83         if (flow->deficit <= 0) {
84                 flow->deficit += fq->quantum;
85                 list_move_tail(&flow->flowchain,
86                                &tin->old_flows);
87                 goto begin;
88         }
89
90         skb = dequeue_func(fq, tin, flow);
91         if (!skb) {
92                 /* force a pass through old_flows to prevent starvation */
93                 if ((head == &tin->new_flows) &&
94                     !list_empty(&tin->old_flows)) {
95                         list_move_tail(&flow->flowchain, &tin->old_flows);
96                 } else {
97                         list_del_init(&flow->flowchain);
98                         flow->tin = NULL;
99                 }
100                 goto begin;
101         }
102
103         flow->deficit -= skb->len;
104         tin->tx_bytes += skb->len;
105         tin->tx_packets++;
106
107         return skb;
108 }
109
110 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
111 {
112         u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
113
114         return reciprocal_scale(hash, fq->flows_cnt);
115 }
116
117 static struct fq_flow *fq_flow_classify(struct fq *fq,
118                                         struct fq_tin *tin, u32 idx,
119                                         struct sk_buff *skb,
120                                         fq_flow_get_default_t get_default_func)
121 {
122         struct fq_flow *flow;
123
124         lockdep_assert_held(&fq->lock);
125
126         flow = &fq->flows[idx];
127         if (flow->tin && flow->tin != tin) {
128                 flow = get_default_func(fq, tin, idx, skb);
129                 tin->collisions++;
130                 fq->collisions++;
131         }
132
133         if (!flow->tin)
134                 tin->flows++;
135
136         return flow;
137 }
138
139 static void fq_recalc_backlog(struct fq *fq,
140                               struct fq_tin *tin,
141                               struct fq_flow *flow)
142 {
143         struct fq_flow *i;
144
145         if (list_empty(&flow->backlogchain))
146                 list_add_tail(&flow->backlogchain, &fq->backlogs);
147
148         i = flow;
149         list_for_each_entry_continue_reverse(i, &fq->backlogs,
150                                              backlogchain)
151                 if (i->backlog > flow->backlog)
152                         break;
153
154         list_move(&flow->backlogchain, &i->backlogchain);
155 }
156
157 static void fq_tin_enqueue(struct fq *fq,
158                            struct fq_tin *tin, u32 idx,
159                            struct sk_buff *skb,
160                            fq_skb_free_t free_func,
161                            fq_flow_get_default_t get_default_func)
162 {
163         struct fq_flow *flow;
164         bool oom;
165
166         lockdep_assert_held(&fq->lock);
167
168         flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
169
170         flow->tin = tin;
171         flow->backlog += skb->len;
172         tin->backlog_bytes += skb->len;
173         tin->backlog_packets++;
174         fq->memory_usage += skb->truesize;
175         fq->backlog++;
176
177         fq_recalc_backlog(fq, tin, flow);
178
179         if (list_empty(&flow->flowchain)) {
180                 flow->deficit = fq->quantum;
181                 list_add_tail(&flow->flowchain,
182                               &tin->new_flows);
183         }
184
185         __skb_queue_tail(&flow->queue, skb);
186         oom = (fq->memory_usage > fq->memory_limit);
187         while (fq->backlog > fq->limit || oom) {
188                 flow = list_first_entry_or_null(&fq->backlogs,
189                                                 struct fq_flow,
190                                                 backlogchain);
191                 if (!flow)
192                         return;
193
194                 skb = fq_flow_dequeue(fq, flow);
195                 if (!skb)
196                         return;
197
198                 free_func(fq, flow->tin, flow, skb);
199
200                 flow->tin->overlimit++;
201                 fq->overlimit++;
202                 if (oom) {
203                         fq->overmemory++;
204                         oom = (fq->memory_usage > fq->memory_limit);
205                 }
206         }
207 }
208
209 static void fq_flow_filter(struct fq *fq,
210                            struct fq_flow *flow,
211                            fq_skb_filter_t filter_func,
212                            void *filter_data,
213                            fq_skb_free_t free_func)
214 {
215         struct fq_tin *tin = flow->tin;
216         struct sk_buff *skb, *tmp;
217
218         lockdep_assert_held(&fq->lock);
219
220         skb_queue_walk_safe(&flow->queue, skb, tmp) {
221                 if (!filter_func(fq, tin, flow, skb, filter_data))
222                         continue;
223
224                 __skb_unlink(skb, &flow->queue);
225                 fq_adjust_removal(fq, flow, skb);
226                 free_func(fq, tin, flow, skb);
227         }
228
229         fq_rejigger_backlog(fq, flow);
230 }
231
232 static void fq_tin_filter(struct fq *fq,
233                           struct fq_tin *tin,
234                           fq_skb_filter_t filter_func,
235                           void *filter_data,
236                           fq_skb_free_t free_func)
237 {
238         struct fq_flow *flow;
239
240         lockdep_assert_held(&fq->lock);
241
242         list_for_each_entry(flow, &tin->new_flows, flowchain)
243                 fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
244         list_for_each_entry(flow, &tin->old_flows, flowchain)
245                 fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
246 }
247
248 static void fq_flow_reset(struct fq *fq,
249                           struct fq_flow *flow,
250                           fq_skb_free_t free_func)
251 {
252         struct sk_buff *skb;
253
254         while ((skb = fq_flow_dequeue(fq, flow)))
255                 free_func(fq, flow->tin, flow, skb);
256
257         if (!list_empty(&flow->flowchain))
258                 list_del_init(&flow->flowchain);
259
260         if (!list_empty(&flow->backlogchain))
261                 list_del_init(&flow->backlogchain);
262
263         flow->tin = NULL;
264
265         WARN_ON_ONCE(flow->backlog);
266 }
267
268 static void fq_tin_reset(struct fq *fq,
269                          struct fq_tin *tin,
270                          fq_skb_free_t free_func)
271 {
272         struct list_head *head;
273         struct fq_flow *flow;
274
275         for (;;) {
276                 head = &tin->new_flows;
277                 if (list_empty(head)) {
278                         head = &tin->old_flows;
279                         if (list_empty(head))
280                                 break;
281                 }
282
283                 flow = list_first_entry(head, struct fq_flow, flowchain);
284                 fq_flow_reset(fq, flow, free_func);
285         }
286
287         WARN_ON_ONCE(tin->backlog_bytes);
288         WARN_ON_ONCE(tin->backlog_packets);
289 }
290
291 static void fq_flow_init(struct fq_flow *flow)
292 {
293         INIT_LIST_HEAD(&flow->flowchain);
294         INIT_LIST_HEAD(&flow->backlogchain);
295         __skb_queue_head_init(&flow->queue);
296 }
297
298 static void fq_tin_init(struct fq_tin *tin)
299 {
300         INIT_LIST_HEAD(&tin->new_flows);
301         INIT_LIST_HEAD(&tin->old_flows);
302 }
303
304 static int fq_init(struct fq *fq, int flows_cnt)
305 {
306         int i;
307
308         memset(fq, 0, sizeof(fq[0]));
309         INIT_LIST_HEAD(&fq->backlogs);
310         spin_lock_init(&fq->lock);
311         fq->flows_cnt = max_t(u32, flows_cnt, 1);
312         fq->perturbation = prandom_u32();
313         fq->quantum = 300;
314         fq->limit = 8192;
315         fq->memory_limit = 16 << 20; /* 16 MBytes */
316
317         fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
318         if (!fq->flows)
319                 return -ENOMEM;
320
321         for (i = 0; i < fq->flows_cnt; i++)
322                 fq_flow_init(&fq->flows[i]);
323
324         return 0;
325 }
326
327 static void fq_reset(struct fq *fq,
328                      fq_skb_free_t free_func)
329 {
330         int i;
331
332         for (i = 0; i < fq->flows_cnt; i++)
333                 fq_flow_reset(fq, &fq->flows[i], free_func);
334
335         kfree(fq->flows);
336         fq->flows = NULL;
337 }
338
339 #endif