20
20
21
21
DECL_FUNC_RET (inet_csk_accept );
22
22
23
- static int sock_dns_event_handle (struct sock * sk ,
24
- struct msghdr * msg ,
25
- enum ebpf_event_type evt_type ,
26
- size_t size )
27
- {
28
- if (!sk ) {
29
- return 0 ;
30
- }
31
-
32
- if (ebpf_events_is_trusted_pid ())
33
- return 0 ;
34
-
35
- struct ebpf_dns_event * event = bpf_ringbuf_reserve (& ringbuf , sizeof (* event ), 0 );
36
- if (!event )
37
- return 0 ;
38
-
39
- // fill in socket and process metadata
40
- if (ebpf_sock_info__fill (& event -> net , sk )) {
41
- goto out ;
42
- }
43
-
44
- struct task_struct * task = (struct task_struct * )bpf_get_current_task ();
45
- ebpf_pid_info__fill (& event -> pids , task );
46
- bpf_get_current_comm (event -> comm , TASK_COMM_LEN );
47
- event -> hdr .ts = bpf_ktime_get_ns ();
48
-
49
- // filter out non-dns packets
50
- if (event -> net .dport != 53 && event -> net .sport != 53 ) {
51
- goto out ;
52
- }
53
-
54
- // deal with the iovec_iter type
55
- // newer kernels added a ubuf type to the iov_iter union,
56
- // which post-dates our vmlinux, but also they added ITER_UBUF as the
57
- // first value in the iter_type enum, which makes checking it a tad hard.
58
- // In theory we should be able to read from both types as long as we're careful
59
-
60
- struct iov_iter * from = & msg -> msg_iter ;
61
-
62
- u64 nr_segs = get_iovec_nr_segs_or_max (from );
63
- u64 iovec_size = BPF_CORE_READ (from , count );
64
-
65
- const struct iovec * iov ;
66
- if (FIELD_OFFSET (iov_iter , __iov ))
67
- iov = (const struct iovec * )((char * )from + FIELD_OFFSET (iov_iter , __iov ));
68
- else if (bpf_core_field_exists (from -> iov ))
69
- iov = BPF_CORE_READ (from , iov );
70
- else {
71
- bpf_printk ("unknown offset in iovec structure, bug?" );
72
- goto out ;
73
- }
74
-
75
- if (nr_segs == 1 ) {
76
- // actually read in raw packet data
77
- // use the retvalue of recvmsg/the count value of sendmsg instead of the the iovec count.
78
- // The count of the iovec in udp_recvmsg is the size of the buffer, not the size of the
79
- // bytes read.
80
- void * base = BPF_CORE_READ (iov , iov_base );
81
- event -> pkts [0 ].len = size ;
82
- // make verifier happy, we can't have an out-of-bounds write
83
- if (size > MAX_DNS_PACKET ) {
84
- bpf_printk ("size of packet (%d) exceeds max packet size (%d), skipping" , size ,
85
- MAX_DNS_PACKET );
86
- goto out ;
87
- }
88
- // TODO: This will fail on recvmsg calls where the peek flag has been set.
89
- // Changes to the udp_recvmsg function call in 5.18 make it a bit annoying to get the
90
- // flags argument portably. So let it fail instead of manually skipping peek calls.
91
- long readok = bpf_probe_read (event -> pkts [0 ].pkt , size , base );
92
- if (readok != 0 ) {
93
- bpf_printk ("invalid read from iovec structure: %d" , readok );
94
- goto out ;
95
- }
96
- } else {
97
- // we have multiple segments.
98
- // Can't rely on the size value from the function, revert to the iovec size to read into the
99
- // buffer
100
- // In practice, I haven't seen a DNS packet with more than one iovec segment;
101
- // the size of UDP DNS packet is limited to 512 bytes, so not sure if this is possible?
102
- for (int seg = 0 ; seg < nr_segs ; seg ++ ) {
103
- if (seg >= MAX_NR_SEGS )
104
- goto out ;
105
-
106
- struct iovec * cur_iov = (struct iovec * )& iov [seg ];
107
- void * base = BPF_CORE_READ (cur_iov , iov_base );
108
- size_t bufsize = BPF_CORE_READ (cur_iov , iov_len );
109
- event -> pkts [seg ].len = bufsize ;
110
- if (bufsize > sizeof (event -> pkts [seg ].pkt )) {
111
- goto out ;
112
- }
113
- bpf_probe_read (event -> pkts [seg ].pkt , bufsize , base );
114
- }
115
- }
116
-
117
- event -> hdr .type = EBPF_EVENT_NETWORK_DNS_PKT ;
118
- event -> udp_evt = evt_type ;
119
- bpf_ringbuf_submit (event , 0 );
120
- return 0 ;
121
-
122
- out :
123
- bpf_ringbuf_discard (event , 0 );
124
- return 0 ;
125
- }
126
-
127
23
static int sock_object_handle (struct sock * sk , enum ebpf_event_type evt_type )
128
24
{
129
25
if (!sk )
@@ -148,9 +44,7 @@ static int sock_object_handle(struct sock *sk, enum ebpf_event_type evt_type)
148
44
}
149
45
150
46
/*
151
- =============================== TEST CODE ===============================
152
-
153
- Testing alternate code. This section will not be merged, or will be cleaned up.
47
+ =============================== DNS probes ===============================
154
48
*/
155
49
156
50
static int handle_consume (struct sk_buff * skb , int len , enum ebpf_event_type evt_type )
@@ -302,65 +196,6 @@ int BPF_KRETPROBE(kretprobe__skb_consume_udp, int ret)
302
196
return handle_consume (kctx .skb , ret , EBPF_EVENT_NETWORK_UDP_RECVMSG );
303
197
}
304
198
305
- /*
306
- =============================== DNS probes ===============================
307
- */
308
-
309
- SEC ("kprobe/udp_sendmsg" )
310
- int BPF_KPROBE (kprobe__udp_sendmsg , struct sock * sk , struct msghdr * msg , size_t size )
311
- {
312
- return sock_dns_event_handle (sk , msg , EBPF_EVENT_NETWORK_UDP_SENDMSG , size );
313
- }
314
-
315
- // We can't get the arguments from a kretprobe, so instead save off the pointer in
316
- // in the kprobe, then fetch the pointer from a context map in the kretprobe
317
-
318
- // SEC("kprobe/udp_recvmsg")
319
- // int BPF_KPROBE(kprobe__udp_recvmsg, struct sock *sk, struct msghdr *msg)
320
- // {
321
- // struct udp_ctx kctx;
322
-
323
- // // I suspect that using the PID_TID isn't the most reliable way to map the sockets/iters
324
- // // not sure what else we could use that's accessable from the kretprobe, though.
325
- // u64 pid_tid = bpf_get_current_pid_tgid();
326
-
327
- // long iter_err = bpf_probe_read(&kctx.hdr, sizeof(kctx.hdr), &msg);
328
- // if (iter_err != 0) {
329
- // bpf_printk("error reading msg_iter in udp_recvmsg: %d", iter_err);
330
- // return 0;
331
- // }
332
-
333
- // long sk_err = bpf_probe_read(&kctx.sk, sizeof(kctx.sk), &sk);
334
- // if (sk_err != 0) {
335
- // bpf_printk("error reading msg_iter in udp_recvmsg: %d", sk_err);
336
- // return 0;
337
- // }
338
-
339
- // long update_err = bpf_map_update_elem(&pkt_ctx, &pid_tid, &kctx, BPF_ANY);
340
- // if (update_err != 0) {
341
- // bpf_printk("error updating context map in udp_recvmsg: %d", update_err);
342
- // return 0;
343
- // }
344
-
345
- // return 0;
346
- // }
347
-
348
- // SEC("kretprobe/udp_recvmsg")
349
- // int BPF_KRETPROBE(kretprobe__udp_recvmsg, int ret)
350
- // {
351
-
352
- // u64 pid_tid = bpf_get_current_pid_tgid();
353
- // void *vctx = bpf_map_lookup_elem(&pkt_ctx, &pid_tid);
354
-
355
- // struct udp_ctx kctx;
356
- // long read_err = bpf_probe_read(&kctx, sizeof(kctx), vctx);
357
- // if (read_err != 0) {
358
- // bpf_printk("error reading back context in udp_recvmsg: %d", read_err);
359
- // }
360
-
361
- // return sock_dns_event_handle(kctx.sk, kctx.hdr, EBPF_EVENT_NETWORK_UDP_RECVMSG, ret);
362
- // }
363
-
364
199
/*
365
200
=============================== TCP probes ===============================
366
201
*/
0 commit comments