-
Notifications
You must be signed in to change notification settings - Fork 103
Expand file tree
/
Copy pathnet_buffer_tuner.bpf.c
More file actions
91 lines (79 loc) · 2.57 KB
/
net_buffer_tuner.bpf.c
File metadata and controls
91 lines (79 loc) · 2.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 2023, Oracle and/or its affiliates.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <bpftune/bpftune.bpf.h>
#include "net_buffer_tuner.h"
#ifndef NET_RX_DROP
#define NET_RX_DROP 1
#endif
__u64 drop_count = 0;
__u64 drop_interval_start = 0;
__u64 flow_limit_cpu_bitmap = 0;
int netdev_max_backlog = 0;
#ifdef BPFTUNE_LEGACY
SEC("kretprobe/enqueue_to_backlog")
int BPF_KRETPROBE(bpftune_enqueue_to_backlog, int ret)
#else
SEC("fexit/enqueue_to_backlog")
int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu,
unsigned int *qtail, int ret)
#endif
{
int max_backlog = netdev_max_backlog;
struct bpftune_event event = { 0 };
long old[3], new[3];
__u64 time, cpubit;
/* a high-frequency event so bail early if we can... */
if (ret != NET_RX_DROP)
return 0;
drop_count++;
/* only sample subset of drops to reduce overhead. */
if ((drop_count % 4) != 0)
return 0;
/* if we drop more than 1/16 of the backlog queue size/min,
* increase backlog queue size. This means as the queue size
* increases, the likliehood of hitting that limit decreases.
*/
time = bpf_ktime_get_ns();
if (!drop_interval_start || (time - drop_interval_start) > MINUTE) {
drop_count = 1;
drop_interval_start = time;
}
if (drop_count < (max_backlog >> 4))
return 0;
old[0] = max_backlog;
new[0] = BPFTUNE_GROW_BY_DELTA(max_backlog);
send_net_sysctl_event(NULL, NETDEV_MAX_BACKLOG_INCREASE,
NETDEV_MAX_BACKLOG, old, new, &event);
#ifdef BPFTUNE_LEGACY
int cpu = bpf_get_smp_processor_id();
#endif
/* ensure flow limits prioritize small flows on this cpu */
if (cpu < 64) {
cpubit = 1 << cpu;
if (!(flow_limit_cpu_bitmap & cpubit)) {
old[0] = flow_limit_cpu_bitmap;
new[0] = flow_limit_cpu_bitmap |= cpubit;
if (!send_net_sysctl_event(NULL, FLOW_LIMIT_CPU_SET,
FLOW_LIMIT_CPU_BITMAP,
old, new, &event))
flow_limit_cpu_bitmap = new[0];
}
}
return 0;
}