Mastering pwru: Install, Use, and Understand Cilium’s eBPF Packet Tracing Tool
pwru, Cilium’s eBPF‑based packet tracing utility, offers fine‑grained network diagnostics; this guide covers kernel requirements, installation steps, command‑line options, real‑world iptables drop detection examples, and an in‑depth look at its eBPF map configuration and perf‑event output mechanism.
pwru is a network packet tracing tool released by Cilium, built on eBPF, providing fine‑grained packet diagnostics.
Installation
Requirements
pwru requires a Linux kernel newer than 5.5; the --output-skb option needs kernel 5.9 or later and the following kernel configurations enabled:
CONFIG_DEBUG_INFO_BTF=y (available since >=5.3)
CONFIG_KPROBES=y
CONFIG_PERF_EVENTS=y
CONFIG_BPF=y
CONFIG_BPF_SYSCALL=y
Usage
Command‑line options
Usage of ./pwru:
--filter-dst-ip string filter destination IP addr
--filter-dst-port uint16 filter destination port
--filter-func string filter kernel functions to be probed by name (exact match, supports RE2 regular expression)
--filter-mark uint32 filter skb mark
--filter-netns uint32 filter netns inode
--filter-proto string filter L4 protocol (tcp, udp, icmp)
--filter-src-ip string filter source IP addr
--filter-src-port uint16 filter source port
--output-limit-lines uint exit the program after the number of events has been received/printed
--output-meta print skb metadata
--output-relative-timestamp print relative timestamp per skb
--output-skb print skb
--output-stack print stack
--output-tuple print L4 tupleCase Demonstration
The following example shows how pwru quickly identifies that a packet was dropped by an iptables rule:
Before adding the iptables rule:
After adding the rule:
iptables -t filter -I OUTPUT 1 -m tcp --proto tcp --dst 1.1.1.1/32 -j DROPThe trace shows a change in the nf_hook_slow function, where the packet is marked as NF_DROP and kfree_skb is called.
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
const struct nf_hook_entries *e, unsigned int s)
{
unsigned int verdict;
int ret;
for (; s < e->num_hook_entries; s++) {
verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
break;
case NF_DROP:
kfree_skb(skb);
ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
return ret;
case NF_QUEUE:
ret = nf_queue(skb, state, s, verdict);
if (ret == 1)
continue;
return ret;
default:
return 0;
}
}
return 1;
}Implementation Details
pwru registers several kprobes that load eBPF code; the parameters passed to pwru update an eBPF map, which controls the filtering criteria.
type FilterCfg struct {
FilterMark uint32
// Filter L3
FilterIPv6 uint8
FilterSrcIP [16]byte
FilterDstIP [16]byte
// Filter L4
FilterProto uint8
FilterSrcPort uint16
FilterDstPort uint16
OutputRelativeTS uint8
OutputMeta uint8
OutputTuple uint8
OutputSkb uint8
OutputStack uint8
Pad byte
}The map is populated based on command‑line flags:
func ConfigBPFMap(flags *Flags, cfgMap *ebpf.Map) {
cfg := FilterCfg{
FilterMark: flags.FilterMark,
}
if flags.FilterSrcPort > 0 {
cfg.FilterSrcPort = byteorder.HostToNetwork16(flags.FilterSrcPort)
}
if flags.FilterDstPort > 0 {
cfg.FilterDstPort = byteorder.HostToNetwork16(flags.FilterDstPort)
}
switch strings.ToLower(flags.FilterProto) {
case "tcp":
cfg.FilterProto = syscall.IPPROTO_TCP
case "udp":
cfg.FilterProto = syscall.IPPROTO_UDP
case "icmp":
cfg.FilterProto = syscall.IPPROTO_ICMP
case "icmp6":
cfg.FilterProto = syscall.IPPROTO_ICMPV6
}
if err := cfgMap.Update(uint32(0), cfg, 0); err != nil {
log.Fatalf("Failed to set filter map: %v", err)
}
}During packet processing the eBPF program looks up this configuration and applies the filter:
struct config {
u32 mark;
u8 ipv6;
union addr saddr;
union addr daddr;
u8 l4_proto;
u16 sport;
u16 dport;
u8 output_timestamp;
u8 output_meta;
u8 output_tuple;
u8 output_skb;
u8 output_stack;
u8 pad;
} __attribute__((packed));
static __always_inline int
handle_everything(struct sk_buff *skb, struct pt_regs *ctx) {
struct event_t event = {};
u32 index = 0;
struct config *cfg = bpf_map_lookup_elem(&cfg_map, &index);
if (cfg) {
if (!filter(skb, cfg))
return 0;
set_output(ctx, skb, &event, cfg);
}
event.pid = bpf_get_current_pid_tgid();
event.addr = PT_REGS_IP(ctx);
event.skb_addr = (u64) skb;
event.ts = bpf_ktime_get_ns();
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
return 0;
}On the user side, pwru reads the perf events and prints them:
rd, err := perf.NewReader(events, os.Getpagesize())
if err != nil {
log.Fatalf("Creating perf event reader: %s", err)
}
defer rd.Close()
var event pwru.Event
for {
record, err := rd.Read()
if err != nil {
if perf.IsClosed(err) {
return
}
log.Printf("Reading from perf event reader: %s", err)
}
if record.LostSamples != 0 {
log.Printf("Perf event ring buffer full, dropped %d samples", record.LostSamples)
continue
}
if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil {
log.Printf("Parsing perf event: %s", err)
continue
}
output.Print(&event)
select {
case <-ctx.Done():
break
default:
continue
}
}This article is reproduced from Houmin’s blog; original link: https://url.hi-linux.com/et8wH.
Open Source Linux
Focused on sharing Linux/Unix content, covering fundamentals, system development, network programming, automation/operations, cloud computing, and related professional knowledge.
How this landed with the community
Was this worth your time?
0 Comments
Thoughtful readers leave field notes, pushback, and hard-won operational detail here.
