diff --git a/calico-vpp-agent/capture/capture_server.go b/calico-vpp-agent/capture/capture_server.go new file mode 100644 index 00000000..f7ea9b0e --- /dev/null +++ b/calico-vpp-agent/capture/capture_server.go @@ -0,0 +1,668 @@ +// Copyright (C) 2025 Cisco Systems Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capture + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/projectcalico/vpp-dataplane/v3/config" + "github.com/projectcalico/vpp-dataplane/v3/vpplink" +) + +// CaptureType represents the type of packet capture +type CaptureType string + +const ( + CaptureTypeTrace CaptureType = "trace" + CaptureTypePcap CaptureType = "pcap" + CaptureTypeDispatch CaptureType = "dispatch" +) + +// CaptureStatus represents the current status of a capture +type CaptureStatus string + +const ( + CaptureStatusIdle CaptureStatus = "idle" + CaptureStatusRunning CaptureStatus = "running" + CaptureStatusStopping CaptureStatus = "stopping" +) + +// BPFFilter contains BPF filter parameters +type BPFFilter struct { + SrcIP string `json:"srcIP,omitempty"` + DstIP string `json:"dstIP,omitempty"` + SrcPort int `json:"srcPort,omitempty"` + DstPort int `json:"dstPort,omitempty"` + Protocol string `json:"protocol,omitempty"` +} + +// CaptureRequest represents a request to start a capture +type CaptureRequest struct { + Type CaptureType `json:"type"` + Count int `json:"count"` + Timeout int `json:"timeout"` + InterfaceType string `json:"interfaceType,omitempty"` + Interface string `json:"interface,omitempty"` + Filter *BPFFilter `json:"filter,omitempty"` +} + +// CaptureResponse represents the response to a capture request +type CaptureResponse struct { + Success bool `json:"success"` + Message string `json:"message"` + Status CaptureStatus `json:"status"` + FilePath string `json:"filePath,omitempty"` + Error string `json:"error,omitempty"` +} + +// StatusResponse represents the status of the capture server +type StatusResponse struct { + Status CaptureStatus `json:"status"` + CaptureType CaptureType `json:"captureType,omitempty"` + RemainingMs int64 `json:"remainingMs,omitempty"` + FilePath string `json:"filePath,omitempty"` +} + +// CaptureServer handles packet capture requests with mutex for single-instance execution +type CaptureServer struct { + log *logrus.Entry + vpp *vpplink.VppLink + httpServer *http.Server + + // Mutex to ensure only one capture runs at a time + captureMutex sync.Mutex + + // Current capture state + status CaptureStatus + captureType CaptureType + startTime time.Time + timeout int + stopChan chan struct{} + filePath string +} + +// NewCaptureServer creates a new CaptureServer +func NewCaptureServer(vpp *vpplink.VppLink, log *logrus.Entry) *CaptureServer { + port := *config.GetCalicoVppInitialConfig().CaptureServerPort + + mux := http.NewServeMux() + server := &CaptureServer{ + log: log, + vpp: vpp, + status: CaptureStatusIdle, + httpServer: &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: mux, + }, + } + + // Register HTTP handlers + mux.HandleFunc("/api/status", server.handleStatus) + mux.HandleFunc("/api/trace", server.handleTrace) + mux.HandleFunc("/api/pcap", server.handlePcap) + mux.HandleFunc("/api/dispatch", server.handleDispatch) + mux.HandleFunc("/api/stop", server.handleStop) + + return server +} + +// ServeCapture starts the capture HTTP server +func (s *CaptureServer) ServeCapture(t *tomb.Tomb) error { + s.log.Infof("Starting capture server on port %d", *config.GetCalicoVppInitialConfig().CaptureServerPort) + + go func() { + err := s.httpServer.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + s.log.Errorf("Capture HTTP server error: %v", err) + } + }() + + <-t.Dying() + s.log.Warn("Capture server shutting down") + + // Stop any running capture + if s.status == CaptureStatusRunning && s.stopChan != nil { + close(s.stopChan) + } + + err := s.httpServer.Shutdown(context.Background()) + if err != nil { + return fmt.Errorf("could not shutdown capture HTTP server: %w", err) + } + + return nil +} + +// handleStatus returns the current status of the capture server +func (s *CaptureServer) handleStatus(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + s.captureMutex.Lock() + defer s.captureMutex.Unlock() + + response := StatusResponse{ + Status: s.status, + CaptureType: s.captureType, + FilePath: s.filePath, + } + + if s.status == CaptureStatusRunning { + elapsed := time.Since(s.startTime) + remaining := max(0, (time.Duration(s.timeout)*time.Second - elapsed)) + response.RemainingMs = remaining.Milliseconds() + } + + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(response) + if err != nil { + s.log.Warnf("Failed to encode status response: %v", err) + } +} + +// handleTrace handles trace capture requests +func (s *CaptureServer) handleTrace(w http.ResponseWriter, r *http.Request) { + s.handleCapture(w, r, CaptureTypeTrace) +} + +// handlePcap handles PCAP capture requests +func (s *CaptureServer) handlePcap(w http.ResponseWriter, r *http.Request) { + s.handleCapture(w, r, CaptureTypePcap) +} + +// handleDispatch handles dispatch trace requests +func (s *CaptureServer) handleDispatch(w http.ResponseWriter, r *http.Request) { + s.handleCapture(w, r, CaptureTypeDispatch) +} + +// parseQueryParams extracts capture parameters from query string +func (s *CaptureServer) parseQueryParams(r *http.Request) CaptureRequest { + query := r.URL.Query() + + req := CaptureRequest{ + Count: 1000, // default + Timeout: 30, // default + InterfaceType: query.Get("interfaceType"), + Interface: query.Get("interface"), + } + + countStr := query.Get("count") + if countStr != "" { + count, err := strconv.Atoi(countStr) + if err == nil && count > 0 { + req.Count = count + } + } + + timeoutStr := query.Get("timeout") + if timeoutStr != "" { + timeout, err := strconv.Atoi(timeoutStr) + if err == nil && timeout > 0 { + req.Timeout = timeout + } + } + + // Parse BPF filter parameters + srcIP := query.Get("srcIP") + dstIP := query.Get("dstIP") + protocol := query.Get("protocol") + srcPort := 0 + dstPort := 0 + + srcPortStr := query.Get("srcPort") + if srcPortStr != "" { + port, err := strconv.Atoi(srcPortStr) + if err == nil && port > 0 && port < 65536 { + srcPort = port + } + } + dstPortStr := query.Get("dstPort") + if dstPortStr != "" { + port, err := strconv.Atoi(dstPortStr) + if err == nil && port > 0 && port < 65536 { + dstPort = port + } + } + + if srcIP != "" || dstIP != "" || protocol != "" || srcPort > 0 || dstPort > 0 { + req.Filter = &BPFFilter{ + SrcIP: srcIP, + DstIP: dstIP, + Protocol: protocol, + SrcPort: srcPort, + DstPort: dstPort, + } + } + + return req +} + +// handleCapture is the common handler for all capture types +func (s *CaptureServer) handleCapture(w http.ResponseWriter, r *http.Request, captureType CaptureType) { + if r.Method != http.MethodGet && r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Try to acquire the mutex (non-blocking) + if !s.captureMutex.TryLock() { + response := CaptureResponse{ + Success: false, + Message: "Another capture is already running", + Status: CaptureStatusRunning, + Error: "capture_in_progress", + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusConflict) + err := json.NewEncoder(w).Encode(response) + if err != nil { + s.log.Warnf("Failed to encode capture conflict response: %v", err) + } + return + } + defer s.captureMutex.Unlock() + + // Parse request from query parameters + req := s.parseQueryParams(r) + req.Type = captureType + + // Start capture + s.status = CaptureStatusRunning + s.captureType = captureType + s.startTime = time.Now() + s.timeout = req.Timeout + s.stopChan = make(chan struct{}) + + s.log.Infof("Starting %s capture: count=%d, timeout=%ds", captureType, req.Count, req.Timeout) + + // Run capture in goroutine + go s.runCapture(req) + + response := CaptureResponse{ + Success: true, + Message: fmt.Sprintf("%s capture started", captureType), + Status: CaptureStatusRunning, + } + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(response) + if err != nil { + s.log.Warnf("Failed to encode capture start response: %v", err) + } +} + +// handleStop stops the current capture +func (s *CaptureServer) handleStop(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet && r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + s.captureMutex.Lock() + defer s.captureMutex.Unlock() + + if s.status != CaptureStatusRunning { + response := CaptureResponse{ + Success: false, + Message: "No capture is running", + Status: s.status, + } + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(response) + if err != nil { + s.log.Warnf("Failed to encode stop response: %v", err) + } + return + } + + // Signal stop + if s.stopChan != nil { + close(s.stopChan) + } + s.status = CaptureStatusStopping + + response := CaptureResponse{ + Success: true, + Message: "Stop signal sent", + Status: CaptureStatusStopping, + } + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(response) + if err != nil { + s.log.Warnf("Failed to encode stop success response: %v", err) + } +} + +// runCapture runs the actual capture using vpplink APIs +func (s *CaptureServer) runCapture(req CaptureRequest) { + defer func() { + s.captureMutex.Lock() + s.status = CaptureStatusIdle + // NOTE: We intentionally DO NOT clear captureType and filePath here + // They are preserved so that 'capture stop' can still download the file + // after capture has auto-stopped (timeout/packet count reached) + // They will be cleared when a new capture starts + s.stopChan = nil + s.captureMutex.Unlock() + }() + + // Set up timeout + timeoutChan := time.After(time.Duration(req.Timeout) * time.Second) + + // Build and apply BPF filter if specified + if req.Filter != nil { + bpfExpr := s.buildBPFFilter(req.Filter) + if bpfExpr != "" { + isPcap := req.Type == CaptureTypePcap || req.Type == CaptureTypeDispatch + err := s.applyBPFFilter(bpfExpr, isPcap) + if err != nil { + s.log.Warnf("Failed to apply BPF filter: %v", err) + } else { + defer func() { + err := s.clearBPFFilter(isPcap) + if err != nil { + s.log.Warnf("Failed to clear BPF filter: %v", err) + } + }() + } + } + } + + // Start the appropriate capture + var err error + switch req.Type { + case CaptureTypeTrace: + err = s.startTrace(req) + case CaptureTypePcap: + err = s.startPcap(req) + case CaptureTypeDispatch: + err = s.startDispatch(req) + } + + if err != nil { + s.log.Errorf("Failed to start %s capture: %v", req.Type, err) + return + } + + // Wait for completion or stop signal + select { + case <-timeoutChan: + s.log.Infof("%s capture completed (timeout)", req.Type) + case <-s.stopChan: + s.log.Infof("%s capture stopped by user", req.Type) + } + + // Stop the capture + switch req.Type { + case CaptureTypeTrace: + s.stopTrace() + case CaptureTypePcap: + s.stopPcap() + case CaptureTypeDispatch: + s.stopDispatch() + } + + s.cleanupAfterCapture(req) +} + +// buildBPFFilter builds a BPF filter expression from the filter parameters +func (s *CaptureServer) buildBPFFilter(filter *BPFFilter) string { + if filter == nil { + return "" + } + + var parts []string + + if filter.Protocol != "" { + parts = append(parts, filter.Protocol) + } + if filter.SrcIP != "" { + parts = append(parts, fmt.Sprintf("src host %s", filter.SrcIP)) + } + if filter.DstIP != "" { + parts = append(parts, fmt.Sprintf("dst host %s", filter.DstIP)) + } + if filter.SrcPort > 0 { + parts = append(parts, fmt.Sprintf("src port %d", filter.SrcPort)) + } + if filter.DstPort > 0 { + parts = append(parts, fmt.Sprintf("dst port %d", filter.DstPort)) + } + + if len(parts) == 0 { + return "" + } + + result := parts[0] + for i := 1; i < len(parts); i++ { + result += " and " + parts[i] + } + return result +} + +// applyBPFFilter applies a BPF filter using vpplink +func (s *CaptureServer) applyBPFFilter(expression string, isPcap bool) error { + s.log.Infof("Applying BPF filter: %s", expression) + + // Add the BPF filter expression + err := s.vpp.BpfAdd(expression) + if err != nil { + return fmt.Errorf("failed to set BPF filter: %w", err) + } + + // Enable BPF filtering function + err = s.vpp.SetBpfFunction(isPcap) + if err != nil { + return fmt.Errorf("failed to enable BPF filter function: %w", err) + } + + return nil +} + +// clearBPFFilter clears the BPF filter +func (s *CaptureServer) clearBPFFilter(isPcap bool) error { + s.log.Info("Clearing BPF filter") + + // Clear the filter function + err := s.vpp.UnsetBpfFunction(isPcap) + if err != nil { + return fmt.Errorf("failed to clear BPF filter function: %w", err) + } + + // Clear the BPF filter expression + err = s.vpp.BpfDel() + if err != nil { + return fmt.Errorf("failed to clear BPF filter: %w", err) + } + + return nil +} + +// startTrace starts a packet trace +func (s *CaptureServer) startTrace(req CaptureRequest) error { + s.log.Infof("Starting trace capture on interface type: %s", req.InterfaceType) + + // Clear existing traces + err := s.vpp.TraceClear() + if err != nil { + s.log.Warnf("Failed to clear existing traces: %v", err) + } + + // Get the input node index for the interface type + inputNode := "virtio-input" // default + if req.InterfaceType != "" { + inputNode = s.mapInterfaceTypeToInputNode(req.InterfaceType) + } + + nodeIndex, err := s.vpp.GetNodeIndex(inputNode) + if err != nil { + return fmt.Errorf("failed to get node index for %s: %w", inputNode, err) + } + + // Start trace capture + useFilter := req.Filter != nil && s.buildBPFFilter(req.Filter) != "" + err = s.vpp.TraceCapture(nodeIndex, uint32(req.Count), useFilter) + if err != nil { + return fmt.Errorf("failed to start trace capture: %w", err) + } + + s.filePath = "/var/run/vpp/trace.txt" + return nil +} + +// stopTrace stops the trace and saves output to /var/run/vpp/trace.txt (shared volume) +func (s *CaptureServer) stopTrace() { + s.log.Info("Stopping trace capture and dumping output") + + // Dump the trace output using binary API + traceOutput, err := s.vpp.TraceDump() + if err != nil { + s.log.Warnf("Failed to dump trace output: %v", err) + return + } + + // Save trace output to shared volume /var/run/vpp (accessible from VPP container) + if traceOutput != "" { + err = os.WriteFile("/var/run/vpp/trace.txt", []byte(traceOutput), 0644) + if err != nil { + s.log.Warnf("Failed to save trace output to /var/run/vpp/trace.txt: %v", err) + } else { + s.log.Infof("Trace output saved to /var/run/vpp/trace.txt (%d bytes)", len(traceOutput)) + } + } else { + s.log.Info("No trace output captured") + } + + // Clear the trace buffer + err = s.vpp.TraceClear() + if err != nil { + s.log.Warnf("Failed to clear trace buffer: %v", err) + } +} + +// startPcap starts a PCAP capture +func (s *CaptureServer) startPcap(req CaptureRequest) error { + s.log.Infof("Starting PCAP capture on interface: %s", req.Interface) + + useFilter := req.Filter != nil && s.buildBPFFilter(req.Filter) != "" + + // Start PCAP capture using vpplink + // Use swIfIndex 0xFFFFFFFF for "any" interface + swIfIndex := uint32(0xFFFFFFFF) + err := s.vpp.PcapTraceOn("/tmp/trace.pcap", uint32(req.Count), 0, swIfIndex, true, true, false, useFilter, false, false) + if err != nil { + return fmt.Errorf("failed to start PCAP capture: %w", err) + } + + s.filePath = "/tmp/trace.pcap" + return nil +} + +// stopPcap stops the PCAP capture +func (s *CaptureServer) stopPcap() { + s.log.Info("Stopping PCAP capture") + + err := s.vpp.PcapTraceOff() + if err != nil { + s.log.Warnf("Failed to stop PCAP capture: %v", err) + } +} + +// startDispatch starts a dispatch trace +func (s *CaptureServer) startDispatch(req CaptureRequest) error { + s.log.Infof("Starting dispatch trace on interface type: %s", req.InterfaceType) + + useFilter := req.Filter != nil && s.buildBPFFilter(req.Filter) != "" + + // Start dispatch trace using vpplink + err := s.vpp.PcapDispatchTraceOn(uint32(req.Count), "/tmp/dispatch.pcap", useFilter) + if err != nil { + return fmt.Errorf("failed to start dispatch trace: %w", err) + } + + s.filePath = "/tmp/dispatch.pcap" + return nil +} + +// stopDispatch stops the dispatch trace +func (s *CaptureServer) stopDispatch() { + s.log.Info("Stopping dispatch trace") + + err := s.vpp.PcapDispatchTraceOff() + if err != nil { + s.log.Warnf("Failed to stop dispatch trace: %v", err) + } +} + +// cleanupAfterCapture reverts capture-specific settings so a new capture can start cleanly +func (s *CaptureServer) cleanupAfterCapture(req CaptureRequest) { + switch req.Type { + case CaptureTypeTrace: + err := s.vpp.TraceSetDefaultFunction() + if err != nil { + s.log.Warnf("Failed to reset trace filter function: %v", err) + } + case CaptureTypePcap, CaptureTypeDispatch: + err := s.vpp.PcapSetDefaultFunction() + if err != nil { + s.log.Warnf("Failed to reset pcap filter function: %v", err) + } + } + + if req.Filter != nil { + err := s.vpp.BpfDel() + if err != nil { + s.log.Warnf("Failed to clear BPF filter: %v", err) + } + } +} + +// mapInterfaceTypeToInputNode maps interface type to VPP input node +func (s *CaptureServer) mapInterfaceTypeToInputNode(interfaceType string) string { + switch interfaceType { + case "memif": + return "memif-input" + case "af_packet": + return "af-packet-input" + case "af_xdp": + return "af_xdp-input" + case "avf": + return "avf-input" + case "vmxnet3": + return "vmxnet3-input" + case "virtio", "tuntap", "": + return "virtio-input" + case "rdma": + return "rdma-input" + case "dpdk": + return "dpdk-input" + case "vcl": + return "session-queue" + default: + return "virtio-input" + } +} diff --git a/calico-vpp-agent/cmd/calico_vpp_dataplane.go b/calico-vpp-agent/cmd/calico_vpp_dataplane.go index aa6170f7..3209f5d6 100644 --- a/calico-vpp-agent/cmd/calico_vpp_dataplane.go +++ b/calico-vpp-agent/cmd/calico_vpp_dataplane.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/capture" "github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/cni" "github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/common" "github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/connectivity" @@ -162,6 +163,7 @@ func main() { } connectivityServer := connectivity.NewConnectivityServer(vpp, felixServer, clientv3, log.WithFields(logrus.Fields{"subcomponent": "connectivity"})) cniServer := cni.NewCNIServer(vpp, felixServer, log.WithFields(logrus.Fields{"component": "cni"})) + captureServer := capture.NewCaptureServer(vpp, log.WithFields(logrus.Fields{"component": "capture"})) /* Pubsub should now be registered */ @@ -263,6 +265,7 @@ func main() { Go(routingServer.ServeRouting) Go(serviceServer.ServeService) Go(cniServer.ServeCNI) + Go(captureServer.ServeCapture) // watch LocalSID if SRv6 is enabled if *config.GetCalicoVppFeatureGates().SRv6Enabled { diff --git a/cmd/calicovppctl/main.go b/cmd/calicovppctl/main.go index 372083a8..c69257da 100755 --- a/cmd/calicovppctl/main.go +++ b/cmd/calicovppctl/main.go @@ -21,9 +21,11 @@ import ( "compress/gzip" "context" "encoding/json" + "errors" "flag" "fmt" "io" + "net" "os" "os/exec" "os/signal" @@ -56,7 +58,6 @@ const ( bashCmd = "bash" vppctlPath = "/usr/bin/vppctl" vppSockPath = "/var/run/vpp/cli.sock" - sudoCmd = "sudo" dockerCmd = "docker" // Command templates @@ -66,8 +67,40 @@ const ( // Kubernetes client timeout kubeClientTimeout = 15 * time.Second + + // Capture lock file path (inside VPP pod) + captureLockFile = "/tmp/calicovppctl.lock" + + // Capture server port + captureServerPort = 9999 ) +// StatusResponse represents the status response from the capture server +type StatusResponse struct { + Status string `json:"status"` + CaptureType string `json:"captureType,omitempty"` + FilePath string `json:"filePath,omitempty"` + RemainingMs int64 `json:"remainingMs,omitempty"` +} + +// CaptureResponse represents the response from capture operations +type CaptureResponse struct { + Success bool `json:"success"` + Message string `json:"message"` + Status string `json:"status"` + FilePath string `json:"filePath,omitempty"` + Error string `json:"error,omitempty"` +} + +// CaptureLockInfo represents the information stored in the lock file +type CaptureLockInfo struct { + NodeName string `json:"node_name"` + Operation string `json:"operation"` + StartedAt time.Time `json:"started_at"` + Hostname string `json:"hostname"` + BPFActive bool `json:"bpf_active"` +} + type KubeClient struct { clientset *kubernetes.Clientset timeout time.Duration @@ -571,16 +604,24 @@ func printHelp() { fmt.Println("calicovppctl vppctl [-node NODENAME] [VPP_COMMANDS...] - Get a vppctl shell or run VPP commands on a specific node") fmt.Println("calicovppctl log [-f] [-component vpp|agent|felix] [-node NODENAME] - Get the logs of vpp (dataplane) or agent (controlplane) or felix daemon") fmt.Println("calicovppctl clear - Clear vpp internal stats") + fmt.Println("calicovppctl capture clear [-node NODENAME] - Clear all active captures and BPF filters (forced cleanup)") + fmt.Println("calicovppctl capture status [-node NODENAME] - Get current capture status via capture server") + fmt.Println("calicovppctl capture stop [-node NODENAME] [-output FILE] - Stop capture (if running) and download the capture file") fmt.Println("calicovppctl export - Create an archive with vpp & k8 system state for debugging") fmt.Println("calicovppctl exportnode [-node NODENAME] - Create an archive with vpp & k8 system state for a specific node") fmt.Println("calicovppctl gdb - Attach gdb to the running vpp on the current machine") fmt.Println("calicovppctl sh [-component vpp|agent] [-node NODENAME] - Get a shell in vpp (dataplane) or agent (controlplane) container") - fmt.Println("calicovppctl trace [-node NODENAME] - Setup VPP packet tracing") + fmt.Println("calicovppctl trace [-node NODENAME] [-use-api] - Setup VPP packet tracing") fmt.Println(" Optional params: [-count N] [-timeout SEC] [-interface phy|af_xdp|af_packet|avf|vmxnet3|virtio|rdma|dpdk|memif|vcl]") - fmt.Println("calicovppctl pcap [-node NODENAME] - Setup VPP PCAP tracing") + fmt.Println(" Filter params: [-srcip IP] [-dstip IP] [-srcport PORT] [-dstport PORT] [-protocol tcp|udp|icmp]") + fmt.Println("calicovppctl pcap [-node NODENAME] [-use-api] - Setup VPP PCAP tracing") fmt.Println(" Optional params: [-count N] [-timeout SEC] [-interface INTERFACE_NAME|any(default)] [-output FILE.pcap]") - fmt.Println("calicovppctl dispatch [-node NODENAME] - Setup VPP dispatch tracing") + fmt.Println(" Filter params: [-srcip IP] [-dstip IP] [-srcport PORT] [-dstport PORT] [-protocol tcp|udp|icmp]") + fmt.Println("calicovppctl dispatch [-node NODENAME] [-use-api] - Setup VPP dispatch tracing") fmt.Println(" Optional params: [-count N] [-timeout SEC] [-interface phy|af_xdp|af_packet|avf|vmxnet3|virtio|rdma|dpdk|memif|vcl] [-output FILE.pcap]") + fmt.Println(" Filter params: [-srcip IP] [-dstip IP] [-srcport PORT] [-dstport PORT] [-protocol tcp|udp|icmp]") + fmt.Println() + fmt.Println("Note: Use -use-api flag to use HTTP capture server (binary API) instead of vppctl commands") fmt.Println() } @@ -595,6 +636,14 @@ func main() { timeout = flag.Int("timeout", 30, "Timeout in seconds for trace/pcap/dispatch commands (default: 30)") interfaceType = flag.String("interface", "", "interface types for trace/dispatch; interface names for pcap. See help for supported types") output = flag.String("output", "", "Output file for pcap/dispatch commands") + // BPF filter flags + srcIP = flag.String("srcip", "", "Source IP address filter (10.0.0.20, 2a04:baba::20, etc.)") + dstIP = flag.String("dstip", "", "Destination IP address filter (169.254.0.1, fe80:face::1, etc.)") + srcPort = flag.Int("srcport", 0, "Source port filter (68, 546, etc.)") + dstPort = flag.Int("dstport", 0, "Destination port filter (67, 547, 80, 443, etc.)") + protocol = flag.String("protocol", "", "Protocol filter (icmp, tcp, udp)") + // API mode flag + useAPI = flag.Bool("use-api", false, "Use HTTP API via capture server instead of vppctl commands") ) // Custom usage function @@ -617,7 +666,7 @@ func main() { // Check if this is a known command if !commandFound && !strings.HasPrefix(arg, "-") { switch arg { - case "vppctl", "log", "clear", "export", "exportnode", "gdb", "sh", "trace", "pcap", "dispatch": + case "vppctl", "log", "clear", "export", "exportnode", "gdb", "sh", "trace", "pcap", "dispatch", "capture": command = arg commandFound = true commandArgs = args[i+1:] @@ -659,6 +708,14 @@ func main() { interfacePtr := flagSet.String("interface", "", "Interface: types (memif,tuntap,vcl) for trace/dispatch; interface names for pcap") outputPtr := flagSet.String("output", "", "Output file for pcap/dispatch commands") helpPtr := flagSet.Bool("help", false, "Show help message") + // BPF filter flags + srcIPPtr := flagSet.String("srcip", "", "Source IP address filter") + dstIPPtr := flagSet.String("dstip", "", "Destination IP address filter") + srcPortPtr := flagSet.Int("srcport", 0, "Source port filter") + dstPortPtr := flagSet.Int("dstport", 0, "Destination port filter") + protocolPtr := flagSet.String("protocol", "", "Protocol filter (tcp, udp, icmp)") + // API mode flag + useAPIPtr := flagSet.Bool("use-api", false, "Use HTTP API via capture server") // Parse all remaining arguments for flags var finalCommandArgs []string @@ -705,6 +762,37 @@ func main() { *followPtr = true case "-help", "--help", "-h": *helpPtr = true + case "-srcip", "--srcip": + if i+1 < len(allArgs) { + *srcIPPtr = allArgs[i+1] + i++ + } + case "-dstip", "--dstip": + if i+1 < len(allArgs) { + *dstIPPtr = allArgs[i+1] + i++ + } + case "-srcport", "--srcport": + if i+1 < len(allArgs) { + if portVal, err := strconv.Atoi(allArgs[i+1]); err == nil { + *srcPortPtr = portVal + } + i++ + } + case "-dstport", "--dstport": + if i+1 < len(allArgs) { + if portVal, err := strconv.Atoi(allArgs[i+1]); err == nil { + *dstPortPtr = portVal + } + i++ + } + case "-protocol", "--protocol", "-proto": + if i+1 < len(allArgs) { + *protocolPtr = allArgs[i+1] + i++ + } + case "-use-api", "--use-api": + *useAPIPtr = true } } else { // This is not a flag, add to final command args @@ -721,6 +809,12 @@ func main() { *interfaceType = *interfacePtr *output = *outputPtr *help = *helpPtr + *srcIP = *srcIPPtr + *dstIP = *dstIPPtr + *srcPort = *srcPortPtr + *dstPort = *dstPortPtr + *protocol = *protocolPtr + *useAPI = *useAPIPtr // Show help if requested if *help { @@ -862,9 +956,16 @@ func main() { handleError(fmt.Errorf("node name is required for trace command. Use -node flag"), "") } - err := traceCommand(k, *nodeName, *count, *timeout, *interfaceType) - if err != nil { - handleError(err, "Trace failed") + if *useAPI { + err := traceCommandAPI(k, *nodeName, *count, *timeout, *interfaceType, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "Trace (API) failed") + } + } else { + err := traceCommand(k, *nodeName, *count, *timeout, *interfaceType, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "Trace failed") + } } case "pcap": @@ -872,9 +973,16 @@ func main() { handleError(fmt.Errorf("node name is required for pcap command. Use -node flag"), "") } - err := pcapCommand(k, *nodeName, *count, *timeout, *interfaceType, *output) - if err != nil { - handleError(err, "PCAP failed") + if *useAPI { + err := pcapCommandAPI(k, *nodeName, *count, *timeout, *interfaceType, *output, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "PCAP (API) failed") + } + } else { + err := pcapCommand(k, *nodeName, *count, *timeout, *interfaceType, *output, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "PCAP failed") + } } case "dispatch": @@ -882,9 +990,56 @@ func main() { handleError(fmt.Errorf("node name is required for dispatch command. Use -node flag"), "") } - err := dispatchCommand(k, *nodeName, *count, *timeout, *interfaceType, *output) - if err != nil { - handleError(err, "Dispatch failed") + if *useAPI { + err := dispatchCommandAPI(k, *nodeName, *count, *timeout, *interfaceType, *output, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "Dispatch (API) failed") + } + } else { + err := dispatchCommand(k, *nodeName, *count, *timeout, *interfaceType, *output, *srcIP, *dstIP, *protocol, *srcPort, *dstPort) + if err != nil { + handleError(err, "Dispatch failed") + } + } + + case "capture": + if len(commandArgs) == 0 { + handleError(fmt.Errorf("capture command requires a subcommand. Use 'capture clear', 'capture status', or 'capture stop'"), "") + } + + switch commandArgs[0] { + case "clear": + if *nodeName == "" { + handleError(fmt.Errorf("node name is required for capture clear command. Use -node flag"), "") + } + + err := captureCleanupCommand(k, *nodeName) + if err != nil { + handleError(err, "Capture cleanup failed") + } + + case "status": + if *nodeName == "" { + handleError(fmt.Errorf("node name is required for capture status command. Use -node flag"), "") + } + + err := captureStatusCommand(k, *nodeName) + if err != nil { + handleError(err, "Capture status failed") + } + + case "stop": + if *nodeName == "" { + handleError(fmt.Errorf("node name is required for capture stop command. Use -node flag"), "") + } + + err := captureStopCommand(k, *nodeName, *output) + if err != nil { + handleError(err, "Capture stop failed") + } + + default: + handleError(fmt.Errorf("unknown capture subcommand: %s. Use 'capture clear', 'capture status', or 'capture stop'", commandArgs[0]), "") } default: @@ -1189,6 +1344,23 @@ func compressAndSaveRemoteFile(k *KubeClient, nodeName, remoteFile, localFile st return fmt.Errorf("could not find calico-vpp-node pod on node '%s': %v", nodeName, err) } + // Check if remote file exists and has content; remove it if empty + checkCmd := fmt.Sprintf(`test -s %q || { rm -f %q; exit 3; }`, remoteFile, remoteFile) + _, err = k.execInPod(namespace, podName, container, "sh", "-c", checkCmd) + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) && exitErr.ExitCode() == 3 { + fmt.Println() + printColored("red", "No packets were captured with the specified filter.") + printColored("grey", "This could mean:") + printColored("grey", " - No matching traffic occurred during the capture period") + printColored("grey", " - The filter expression may be too restrictive") + printColored("grey", " - Try running without filters to verify if there is traffic") + return nil + } + return fmt.Errorf("failed to check remote file: %v", err) + } + printColored("green", fmt.Sprintf("Compressing and downloading file from node '%s'", nodeName)) printColored("grey", fmt.Sprintf("Pod: %s", podName)) printColored("grey", fmt.Sprintf("Remote file: %s", remoteFile)) @@ -1317,12 +1489,397 @@ func mapInterfaceTypeToVppInputNode(k *KubeClient, interfaceType string) (string } } -func traceCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType string) error { +// The following functions are used with the --use-api flag when calicovppctl uses the HTTP capture server (binary API) + +// callCaptureAPI calls the capture server API via kubectl exec curl +func (k *KubeClient) callCaptureAPI(nodeName, endpoint string, queryParams map[string]string) (string, error) { + // Find the agent pod on the specified node + podName, err := k.findNodePod(nodeName, defaultPod, defaultNamespace) + if err != nil { + return "", fmt.Errorf("could not find calico-vpp-node pod on node '%s': %v", nodeName, err) + } + + // Build URL with query parameters + url := fmt.Sprintf("http://localhost:%d/api/%s", captureServerPort, endpoint) + if len(queryParams) > 0 { + var params []string + for key, value := range queryParams { + if value != "" { + params = append(params, fmt.Sprintf("%s=%s", key, value)) + } + } + if len(params) > 0 { + url += "?" + strings.Join(params, "&") + } + } + + // Execute curl via kubectl exec in the agent container + curlCmd := fmt.Sprintf("curl -s '%s'", url) + output, err := k.execInPod(defaultNamespace, podName, defaultContainerAgt, "sh", "-c", curlCmd) + if err != nil { + return "", fmt.Errorf("failed to call capture API: %v", err) + } + + return output, nil +} + +// getCaptureStatus gets the current status from the capture server +func (k *KubeClient) getCaptureStatus(nodeName string) (*StatusResponse, error) { + output, err := k.callCaptureAPI(nodeName, "status", nil) + if err != nil { + return nil, err + } + + var status StatusResponse + err = json.Unmarshal([]byte(output), &status) + if err != nil { + return nil, fmt.Errorf("failed to parse status response: %v", err) + } + + return &status, nil +} + +// startCaptureAPI starts a capture via the capture server API +func (k *KubeClient) startCaptureAPI(nodeName, captureType string, count, timeout int, interfaceType, srcIP, dstIP, protocol string, srcPort, dstPort int) (*CaptureResponse, error) { + params := map[string]string{ + "count": fmt.Sprintf("%d", count), + "timeout": fmt.Sprintf("%d", timeout), + } + + if interfaceType != "" { + params["interfaceType"] = interfaceType + } + if srcIP != "" { + params["srcIP"] = srcIP + } + if dstIP != "" { + params["dstIP"] = dstIP + } + if protocol != "" { + params["protocol"] = protocol + } + if srcPort > 0 { + params["srcPort"] = fmt.Sprintf("%d", srcPort) + } + if dstPort > 0 { + params["dstPort"] = fmt.Sprintf("%d", dstPort) + } + + output, err := k.callCaptureAPI(nodeName, captureType, params) + if err != nil { + return nil, err + } + + var response CaptureResponse + err = json.Unmarshal([]byte(output), &response) + if err != nil { + return nil, fmt.Errorf("failed to parse capture response: %v", err) + } + + return &response, nil +} + +// stopCaptureAPI stops the current capture via the capture server API +func (k *KubeClient) stopCaptureAPI(nodeName string) (*CaptureResponse, error) { + output, err := k.callCaptureAPI(nodeName, "stop", nil) + if err != nil { + return nil, err + } + + var response CaptureResponse + err = json.Unmarshal([]byte(output), &response) + if err != nil { + return nil, fmt.Errorf("failed to parse stop response: %v", err) + } + + return &response, nil +} + +func printBpfFilterParams(srcIP, dstIP, protocol string, srcPort, dstPort int) { + printColored("grey", "BPF Filter parameters:") + if srcIP != "" { + printColored("grey", fmt.Sprintf(" Source IP: %s", srcIP)) + } + if dstIP != "" { + printColored("grey", fmt.Sprintf(" Destination IP: %s", dstIP)) + } + if protocol != "" { + printColored("grey", fmt.Sprintf(" Protocol: %s", protocol)) + } + if srcPort > 0 { + printColored("grey", fmt.Sprintf(" Source Port: %d", srcPort)) + } + if dstPort > 0 { + printColored("grey", fmt.Sprintf(" Destination Port: %d", dstPort)) + } + fmt.Println() +} + +// traceCommandAPI runs trace using the capture server API +func traceCommandAPI(k *KubeClient, nodeName string, count int, timeout int, interfaceType, srcIP, dstIP, protocol string, srcPort, dstPort int) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + printColored("green", fmt.Sprintf("Starting packet trace via API on node '%s'", validatedNode)) + printColored("grey", fmt.Sprintf("Packet count: %d", count)) + printColored("grey", fmt.Sprintf("Timeout: %d seconds", timeout)) + if interfaceType != "" { + printColored("grey", fmt.Sprintf("Interface type: %s", interfaceType)) + } + if srcIP != "" || dstIP != "" || protocol != "" || srcPort > 0 || dstPort > 0 { + printBpfFilterParams(srcIP, dstIP, protocol, srcPort, dstPort) + } + + // Start capture via API + response, err := k.startCaptureAPI(validatedNode, "trace", count, timeout, interfaceType, srcIP, dstIP, protocol, srcPort, dstPort) + if err != nil { + return fmt.Errorf("failed to start trace: %v", err) + } + + if !response.Success { + return fmt.Errorf("trace failed: %s (error: %s)", response.Message, response.Error) + } + + printColored("green", response.Message) + fmt.Println() + printColored("blue", fmt.Sprintf("Capture will auto-stop after %d seconds or when %d packets are captured", timeout, count)) + printColored("grey", "Use 'calicovppctl capture status -node ' to check capture progress") + printColored("grey", "Use 'calicovppctl capture stop -node ' to stop and download the capture file") + + return nil +} + +// pcapCommandAPI runs pcap using the capture server API +func pcapCommandAPI(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile, srcIP, dstIP, protocol string, srcPort, dstPort int) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + printColored("green", fmt.Sprintf("Starting PCAP capture via API on node '%s'", validatedNode)) + printColored("grey", fmt.Sprintf("Packet count: %d", count)) + printColored("grey", fmt.Sprintf("Timeout: %d seconds", timeout)) + if interfaceType != "" { + printColored("grey", fmt.Sprintf("Interface: %s", interfaceType)) + } + if srcIP != "" || dstIP != "" || protocol != "" || srcPort > 0 || dstPort > 0 { + printBpfFilterParams(srcIP, dstIP, protocol, srcPort, dstPort) + } + + // Start capture via API + response, err := k.startCaptureAPI(validatedNode, "pcap", count, timeout, interfaceType, srcIP, dstIP, protocol, srcPort, dstPort) + if err != nil { + return fmt.Errorf("failed to start pcap: %v", err) + } + + if !response.Success { + return fmt.Errorf("pcap failed: %s (error: %s)", response.Message, response.Error) + } + + printColored("green", response.Message) + fmt.Println() + printColored("blue", fmt.Sprintf("Capture will auto-stop after %d seconds or when %d packets are captured", timeout, count)) + printColored("grey", "Use 'calicovppctl capture status -node ' to check capture progress") + printColored("grey", "Use 'calicovppctl capture stop -node ' to stop and download the capture file") + + return nil +} + +// dispatchCommandAPI runs dispatch using the capture server API +func dispatchCommandAPI(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile, srcIP, dstIP, protocol string, srcPort, dstPort int) error { validatedNode, err := validateNodeName(k, nodeName) if err != nil { return err } + printColored("green", fmt.Sprintf("Starting dispatch trace via API on node '%s'", validatedNode)) + printColored("grey", fmt.Sprintf("Packet count: %d", count)) + printColored("grey", fmt.Sprintf("Timeout: %d seconds", timeout)) + if interfaceType != "" { + printColored("grey", fmt.Sprintf("Interface type: %s", interfaceType)) + } + if srcIP != "" || dstIP != "" || protocol != "" || srcPort > 0 || dstPort > 0 { + printBpfFilterParams(srcIP, dstIP, protocol, srcPort, dstPort) + } + + // Start capture via API + response, err := k.startCaptureAPI(validatedNode, "dispatch", count, timeout, interfaceType, srcIP, dstIP, protocol, srcPort, dstPort) + if err != nil { + return fmt.Errorf("failed to start dispatch: %v", err) + } + + if !response.Success { + return fmt.Errorf("dispatch failed: %s (error: %s)", response.Message, response.Error) + } + + printColored("green", response.Message) + fmt.Println() + printColored("blue", fmt.Sprintf("Capture will auto-stop after %d seconds or when %d packets are captured", timeout, count)) + printColored("grey", "Use 'calicovppctl capture status -node ' to check capture progress") + printColored("grey", "Use 'calicovppctl capture stop -node ' to stop and download the capture file") + + return nil +} + +// captureStatusCommand shows the current capture status via API +func captureStatusCommand(k *KubeClient, nodeName string) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + status, err := k.getCaptureStatus(validatedNode) + if err != nil { + return fmt.Errorf("failed to get status: %v", err) + } + + printColored("green", fmt.Sprintf("Capture status on node '%s':", validatedNode)) + printColored("grey", fmt.Sprintf(" Status: %s", status.Status)) + if status.CaptureType != "" { + printColored("grey", fmt.Sprintf(" Capture Type: %s", status.CaptureType)) + } + if status.FilePath != "" { + printColored("grey", fmt.Sprintf(" File Path: %s", status.FilePath)) + } + if status.RemainingMs > 0 { + printColored("grey", fmt.Sprintf(" Time Remaining: %.1f seconds", float64(status.RemainingMs)/1000)) + } + + return nil +} + +// captureStopCommand stops the current capture (if running) and downloads the capture file +// Handles multiple scenarios: +// 1) Capture is still running -> stop it, then download file +// 2) Capture already stopped (timeout/packet count reached) -> just download file +// 3) No capture was started or file already downloaded -> error message +func captureStopCommand(k *KubeClient, nodeName string, outputFile string) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + // First, get current status to understand the state + status, err := k.getCaptureStatus(validatedNode) + if err != nil { + return fmt.Errorf("failed to get capture status: %v", err) + } + + captureType := status.CaptureType + filePath := status.FilePath + + // If capture is running, stop it first + switch status.Status { + case "running": + printColored("blue", fmt.Sprintf("Stopping active %s capture on node '%s'...", captureType, validatedNode)) + response, err := k.stopCaptureAPI(validatedNode) + if err != nil { + return fmt.Errorf("failed to stop capture: %v", err) + } + if !response.Success { + printColored("red", fmt.Sprintf("Warning: Stop returned: %s", response.Message)) + } else { + printColored("green", "Capture stopped successfully") + } + // Use file path from stop response if available + if response.FilePath != "" { + filePath = response.FilePath + } + case "idle": + // Capture is not running - check if there's a file path from previous capture + if filePath == "" && captureType == "" { + return fmt.Errorf("no capture is running and no previous capture file found on node '%s'.\n"+ + "Either no capture was started, or the file was already downloaded and cleaned up.\n"+ + "Start a new capture with 'calicovppctl trace/pcap/dispatch -node %s --use-api'", validatedNode, validatedNode) + } + printColored("blue", fmt.Sprintf("Capture already stopped on node '%s', downloading file...", validatedNode)) + } + + // Determine remote and local file paths based on capture type + var remoteFile, localFile string + switch captureType { + case "trace": + remoteFile = "/var/run/vpp/trace.txt" + localFile = "./trace.txt.gz" + case "pcap": + remoteFile = "/tmp/trace.pcap" + localFile = "./trace.pcap.gz" + case "dispatch": + remoteFile = "/tmp/dispatch.pcap" + localFile = "./dispatch.pcap.gz" + default: + // Try to infer from filePath if captureType is empty + if filePath != "" { + remoteFile = filePath + localFile = "./" + filepath.Base(filePath) + ".gz" + } else { + return fmt.Errorf("unknown capture type and no file path available") + } + } + + // Override local file if user specified output + if outputFile != "" { + localFile = outputFile + if !strings.HasSuffix(localFile, ".gz") { + localFile = localFile + ".gz" + } + } + + // Download the file + err = compressAndSaveRemoteFile(k, validatedNode, remoteFile, localFile) + // Check if file doesn't exist (already downloaded or never created) + if err != nil { + return fmt.Errorf("failed to download capture file: %v\n"+ + "The file may have already been downloaded and cleaned up, or the capture produced no output", err) + } + + return nil +} + +// The following functions are used when calicovppctl runs in the legacy mode with vppctl commands via "kubectl exec" + +func traceCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType, srcIP, dstIP, protocol string, srcPort, dstPort int) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + // Check for existing capture locks + lockInfo, err := checkCaptureLock(k, validatedNode) + if err != nil { + return fmt.Errorf("failed to check capture lock: %v", err) + } + if lockInfo != nil { + return fmt.Errorf("capture operation already running on node '%s'\n"+ + " Operation: %s\n"+ + " Started by: %s\n"+ + " Started at: %s\n"+ + " BPF filters active: %t\n\n"+ + "Use 'calicovppctl capture clear -node %s' to force cleanup if the previous operation failed", + validatedNode, lockInfo.Operation, lockInfo.Hostname, + lockInfo.StartedAt.Format("2006-01-02 15:04:05"), lockInfo.BPFActive, validatedNode) + } + + // Check if BPF filters will be used + bpfFilter := buildBPFFilter(srcIP, dstIP, protocol, srcPort, dstPort) + useBPF := bpfFilter != "" + + // Create capture lock + err = createCaptureLock(k, validatedNode, "trace", useBPF) + if err != nil { + return fmt.Errorf("failed to create capture lock: %v", err) + } + + // Ensure cleanup on exit + defer func() { + err := removeCaptureLock(k, validatedNode) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to remove capture lock: %v", err)) + } + }() + vppInputNode, _, err := mapInterfaceTypeToVppInputNode(k, interfaceType) if err != nil { return err @@ -1333,6 +1890,25 @@ func traceCommand(k *KubeClient, nodeName string, count int, timeout int, interf printColored("grey", fmt.Sprintf("Timeout: %d seconds", timeout)) printColored("grey", fmt.Sprintf("VPP Input Node: %s", vppInputNode)) printColored("grey", "Output file: ./trace.txt.gz") + + // Apply BPF filter if specified + if bpfFilter != "" { + printColored("grey", fmt.Sprintf("BPF Filter: %s", bpfFilter)) + err := applyBPFFilter(k, validatedNode, bpfFilter, false) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to apply BPF filter: %v", err)) + printColored("grey", "Continuing without filter...") + } else { + useBPF = true + defer func() { + printColored("blue", "Clearing BPF filter...") + err := clearBPFFilter(k, validatedNode, false) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear BPF filter: %v", err)) + } + }() + } + } fmt.Println() // Clear any existing traces first @@ -1343,8 +1919,12 @@ func traceCommand(k *KubeClient, nodeName string, count int, timeout int, interf // Add trace for specified interface type printColored("blue", "Starting packet trace...") - printColored("grey", fmt.Sprintf("Command: trace add %s %d", vppInputNode, count)) - _, err = k.vppctl(validatedNode, "trace", "add", vppInputNode, fmt.Sprintf("%d", count)) + traceCmd := []string{"trace", "add", vppInputNode, fmt.Sprintf("%d", count)} + if useBPF { + traceCmd = append(traceCmd, "filter") + } + printColored("grey", fmt.Sprintf("Command: %s", strings.Join(traceCmd, " "))) + _, err = k.vppctl(validatedNode, traceCmd...) if err != nil { return fmt.Errorf("failed to add trace: %v", err) } @@ -1432,12 +2012,46 @@ func traceCommand(k *KubeClient, nodeName string, count int, timeout int, interf return nil } -func pcapCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile string) error { +func pcapCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile, srcIP, dstIP, protocol string, srcPort, dstPort int) error { validatedNode, err := validateNodeName(k, nodeName) if err != nil { return err } + // Check for existing capture locks + lockInfo, err := checkCaptureLock(k, validatedNode) + if err != nil { + return fmt.Errorf("failed to check capture lock: %v", err) + } + if lockInfo != nil { + return fmt.Errorf("capture operation already running on node '%s'\n"+ + " Operation: %s\n"+ + " Started by: %s\n"+ + " Started at: %s\n"+ + " BPF filters active: %t\n\n"+ + "Use 'calicovppctl capture clear -node %s' to force cleanup if the previous operation failed", + validatedNode, lockInfo.Operation, lockInfo.Hostname, + lockInfo.StartedAt.Format("2006-01-02 15:04:05"), lockInfo.BPFActive, validatedNode) + } + + // Check if BPF filters will be used + bpfFilter := buildBPFFilter(srcIP, dstIP, protocol, srcPort, dstPort) + useBPF := bpfFilter != "" + + // Create capture lock + err = createCaptureLock(k, validatedNode, "pcap", useBPF) + if err != nil { + return fmt.Errorf("failed to create capture lock: %v", err) + } + + // Ensure cleanup on exit + defer func() { + err := removeCaptureLock(k, validatedNode) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to remove capture lock: %v", err)) + } + }() + // First, let's validate that we can access the VPP interfaces interfacesOutput, err := k.vppctl(validatedNode, "show", "interface") if err != nil { @@ -1479,7 +2093,12 @@ func pcapCommand(k *KubeClient, nodeName string, count int, timeout int, interfa printColored("grey", "No interface specified, using 'any' to capture on all interfaces") } - pcapCommand := fmt.Sprintf("pcap trace tx rx max %d intfc %s file trace.pcap", count, interfaceFilter) + pcapCommand := []string{ + "pcap", "trace", "tx", "rx", + "max", fmt.Sprintf("%d", count), + "intfc", interfaceFilter, + "file", "trace.pcap", + } printColored("green", fmt.Sprintf("Starting PCAP trace on node '%s'", validatedNode)) printColored("grey", fmt.Sprintf("Packet count: %d", count)) @@ -1488,11 +2107,33 @@ func pcapCommand(k *KubeClient, nodeName string, count int, timeout int, interfa if outputFile != "" { printColored("grey", fmt.Sprintf("Output file: ./%s.gz", outputFile)) } + + // Apply BPF filter if specified + if bpfFilter != "" { + printColored("grey", fmt.Sprintf("BPF Filter: %s", bpfFilter)) + err := applyBPFFilter(k, validatedNode, bpfFilter, true) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to apply BPF filter: %v", err)) + printColored("grey", "Continuing without filter...") + } else { + useBPF = true + defer func() { + printColored("blue", "Clearing BPF filter...") + err := clearBPFFilter(k, validatedNode, true) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear BPF filter: %v", err)) + } + }() + } + } fmt.Println() printColored("blue", "Starting PCAP trace...") - printColored("grey", fmt.Sprintf("Command: %s", pcapCommand)) - _, err = k.vppctl(validatedNode, strings.Split(pcapCommand, " ")...) + if useBPF { + pcapCommand = append(pcapCommand, "filter") + } + printColored("grey", fmt.Sprintf("Command: %s", strings.Join(pcapCommand, " "))) + _, err = k.vppctl(validatedNode, pcapCommand...) if err != nil { return fmt.Errorf("failed to start PCAP trace: %v", err) } @@ -1569,18 +2210,57 @@ func pcapCommand(k *KubeClient, nodeName string, count int, timeout int, interfa return nil } -func dispatchCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile string) error { +func dispatchCommand(k *KubeClient, nodeName string, count int, timeout int, interfaceType, outputFile, srcIP, dstIP, protocol string, srcPort, dstPort int) error { validatedNode, err := validateNodeName(k, nodeName) if err != nil { return err } + // Check for existing capture locks + lockInfo, err := checkCaptureLock(k, validatedNode) + if err != nil { + return fmt.Errorf("failed to check capture lock: %v", err) + } + if lockInfo != nil { + return fmt.Errorf("capture operation already running on node '%s'\n"+ + " Operation: %s\n"+ + " Started by: %s\n"+ + " Started at: %s\n"+ + " BPF filters active: %t\n\n"+ + "Use 'calicovppctl capture clear -node %s' to force cleanup if the previous operation failed", + validatedNode, lockInfo.Operation, lockInfo.Hostname, + lockInfo.StartedAt.Format("2006-01-02 15:04:05"), lockInfo.BPFActive, validatedNode) + } + + // Check if BPF filters will be used + bpfFilter := buildBPFFilter(srcIP, dstIP, protocol, srcPort, dstPort) + useBPF := bpfFilter != "" + + // Create capture lock + err = createCaptureLock(k, validatedNode, "dispatch", useBPF) + if err != nil { + return fmt.Errorf("failed to create capture lock: %v", err) + } + + // Ensure cleanup on exit + defer func() { + err := removeCaptureLock(k, validatedNode) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to remove capture lock: %v", err)) + } + }() + vppInputNode, _, err := mapInterfaceTypeToVppInputNode(k, interfaceType) if err != nil { return err } - dispatchCommand := fmt.Sprintf("pcap dispatch trace on max %d buffer-trace %s %d", count, vppInputNode, count) + dispatchCmd := []string{ + "pcap", "dispatch", "trace", "on", + "max", fmt.Sprintf("%d", count), + "buffer-trace", vppInputNode, fmt.Sprintf("%d", count), + "file", "dispatch.pcap", + } printColored("green", fmt.Sprintf("Starting dispatch trace on node '%s'", validatedNode)) printColored("grey", fmt.Sprintf("Packet count: %d", count)) @@ -1589,11 +2269,33 @@ func dispatchCommand(k *KubeClient, nodeName string, count int, timeout int, int if outputFile != "" { printColored("grey", fmt.Sprintf("Output file: ./%s.gz", outputFile)) } + + // Apply BPF filter if specified + if bpfFilter != "" { + printColored("grey", fmt.Sprintf("BPF Filter: %s", bpfFilter)) + err := applyBPFFilter(k, validatedNode, bpfFilter, true) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to apply BPF filter: %v", err)) + printColored("grey", "Continuing without filter...") + } else { + useBPF = true + defer func() { + printColored("blue", "Clearing BPF filter...") + err := clearBPFFilter(k, validatedNode, true) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear BPF filter: %v", err)) + } + }() + } + } fmt.Println() printColored("blue", "Starting dispatch trace...") - printColored("grey", fmt.Sprintf("Command: %s", dispatchCommand)) - _, err = k.vppctl(validatedNode, strings.Split(dispatchCommand, " ")...) + if useBPF { + dispatchCmd = append(dispatchCmd, "filter") + } + printColored("grey", fmt.Sprintf("Command: %s", strings.Join(dispatchCmd, " "))) + _, err = k.vppctl(validatedNode, dispatchCmd...) if err != nil { return fmt.Errorf("failed to start dispatch trace: %v", err) } @@ -1706,3 +2408,291 @@ func parseVppInterfaces(output string) []string { return upInterfaces } + +func validateIP(ip string) error { + if ip == "" { + return nil + } + if net.ParseIP(ip) == nil { + return fmt.Errorf("invalid IP address: %s", ip) + } + return nil +} + +// buildBPFFilter constructs a BPF filter expression from the provided parameters +func buildBPFFilter(srcIP, dstIP, protocol string, srcPort, dstPort int) string { + var filters []string + + // Add protocol filter + if protocol != "" { + switch strings.ToLower(protocol) { + case "tcp": + filters = append(filters, "tcp") + case "udp": + filters = append(filters, "udp") + case "icmp": + filters = append(filters, "icmp") + default: + printColored("red", fmt.Sprintf("Warning: Unknown protocol '%s', ignoring", protocol)) + } + } + + // Add IP filters + if srcIP != "" { + err := validateIP(srcIP) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Invalid source IP '%s', ignoring", srcIP)) + } else { + filters = append(filters, fmt.Sprintf("src host %s", srcIP)) + } + } + if dstIP != "" { + err := validateIP(dstIP) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Invalid destination IP '%s', ignoring", dstIP)) + } else { + filters = append(filters, fmt.Sprintf("dst host %s", dstIP)) + } + } + + // Add port filters + if srcPort != 0 { + if srcPort > 0 && srcPort < 65536 { + filters = append(filters, fmt.Sprintf("src port %d", srcPort)) + } else { + printColored("red", fmt.Sprintf("Warning: Invalid source port '%d', ignoring", srcPort)) + } + } + if dstPort != 0 { + if dstPort > 0 && dstPort < 65536 { + filters = append(filters, fmt.Sprintf("dst port %d", dstPort)) + } else { + printColored("red", fmt.Sprintf("Warning: Invalid destination port '%d', ignoring", dstPort)) + } + } + + if len(filters) == 0 { + return "" + } + + return strings.Join(filters, " and ") +} + +func applyBPFFilter(k *KubeClient, nodeName, bpfFilter string, isPcap bool) error { + printColored("blue", fmt.Sprintf("Applying BPF filter: %s", bpfFilter)) + + // Set the BPF filter expression ()set bpf trace filter {{}}) + filterArg := fmt.Sprintf("{{%s}}", bpfFilter) + out, err := k.vppctl(nodeName, "set", "bpf", "trace", "filter", filterArg) + if err != nil { + return fmt.Errorf("failed to set BPF filter: %v", err) + } + // Check if pcap_compile failed (VPP returns this in stdout, not as error) + if strings.Contains(out, "Failed pcap_compile") { + return fmt.Errorf("BPF filter compilation failed: %s", out) + } + + // Enable BPF filtering function (set trace/pcap filter function bpf_trace_filter) + if isPcap { + _, err = k.vppctl(nodeName, "set", "pcap", "filter", "function", "bpf_trace_filter") + } else { + _, err = k.vppctl(nodeName, "set", "trace", "filter", "function", "bpf_trace_filter") + } + if err != nil { + return fmt.Errorf("failed to enable BPF filter function: %v", err) + } + + printColored("green", "BPF filter applied successfully") + return nil +} + +// clearBPFFilter removes BPF filters from VPP +func clearBPFFilter(k *KubeClient, nodeName string, isPcap bool) error { + // Remove the BPF filter expression (set bpf trace filter del) + _, err := k.vppctl(nodeName, "set", "bpf", "trace", "filter", "del") + if err != nil { + return fmt.Errorf("failed to remove BPF filter: %v", err) + } + + // Reset to default filter function (set trace/pcap filter function vnet_is_packet_traced) + if isPcap { + _, err = k.vppctl(nodeName, "set", "pcap", "filter", "function", "vnet_is_packet_traced") + } else { + _, err = k.vppctl(nodeName, "set", "trace", "filter", "function", "vnet_is_packet_traced") + } + if err != nil { + return fmt.Errorf("failed to reset filter function: %v", err) + } + + printColored("green", "BPF filter cleared successfully") + return nil +} + +// createCaptureLock creates a lock file for the specified node and operation +func createCaptureLock(k *KubeClient, nodeName, operation string, bpfActive bool) error { + // Get hostname + hostname, err := os.Hostname() + if err != nil { + return fmt.Errorf("failed to get hostname: %v", err) + } + + lockInfo := CaptureLockInfo{ + NodeName: nodeName, + Operation: operation, + StartedAt: time.Now(), + Hostname: hostname, + BPFActive: bpfActive, + } + + lockData, err := json.MarshalIndent(lockInfo, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal lock info: %v", err) + } + + // Find the pod on the specified node + podName, err := k.findNodePod(nodeName, defaultPod, defaultNamespace) + if err != nil { + return fmt.Errorf("could not find calico-vpp-node pod on node '%s': %v", nodeName, err) + } + + // Create lock file inside VPP pod using kubectl exec + createCmd := fmt.Sprintf("cat > %s", captureLockFile) + _, err = k.execInPod(defaultNamespace, podName, defaultContainerVpp, "sh", "-c", createCmd) + if err != nil { + return fmt.Errorf("failed to create lock file in VPP pod: %v", err) + } + + // Write the lock data to the file via stdin + cmd := exec.Command(kubectlCmd, "exec", "-i", "-n", defaultNamespace, + "-c", defaultContainerVpp, podName, "--", "sh", "-c", createCmd) + cmd.Stdin = strings.NewReader(string(lockData)) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to write lock file data: %v, output: %s", err, string(output)) + } + + return nil +} + +// checkCaptureLock checks if there's an active capture lock and returns conflict info +func checkCaptureLock(k *KubeClient, nodeName string) (*CaptureLockInfo, error) { + // Find the pod on the specified node + podName, err := k.findNodePod(nodeName, defaultPod, defaultNamespace) + if err != nil { + return nil, fmt.Errorf("could not find calico-vpp-node pod on node '%s': %v", nodeName, err) + } + + // Check if lock file exists in VPP pod + checkCmd := fmt.Sprintf("test -f %s", captureLockFile) + _, err = k.execInPod(defaultNamespace, podName, defaultContainerVpp, "sh", "-c", checkCmd) + if err != nil { + // Lock file doesn't exist + return nil, nil + } + + // Read lock file from VPP pod + readCmd := fmt.Sprintf("cat %s", captureLockFile) + lockData, err := k.execInPod(defaultNamespace, podName, defaultContainerVpp, "sh", "-c", readCmd) + if err != nil { + return nil, fmt.Errorf("failed to read lock file from VPP pod: %v", err) + } + + var lockInfo CaptureLockInfo + err = json.Unmarshal([]byte(lockData), &lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to parse lock file: %v", err) + } + + // Check if the lock is for the same node + if lockInfo.NodeName == nodeName { + return &lockInfo, nil + } + + return nil, nil // Lock exists but for different node (should not happen) +} + +// removeCaptureLock removes the capture lock file +func removeCaptureLock(k *KubeClient, nodeName string) error { + // Find the pod on the specified node + podName, err := k.findNodePod(nodeName, defaultPod, defaultNamespace) + if err != nil { + return fmt.Errorf("could not find calico-vpp-node pod on node '%s': %v", nodeName, err) + } + + // Check if lock file exists in VPP pod + checkCmd := fmt.Sprintf("test -f %s", captureLockFile) + _, err = k.execInPod(defaultNamespace, podName, defaultContainerVpp, "sh", "-c", checkCmd) + if err != nil { + // Lock file doesn't exist, nothing to remove + return nil + } + + // Remove lock file from VPP pod + removeCmd := fmt.Sprintf("rm -f %s", captureLockFile) + _, err = k.execInPod(defaultNamespace, podName, defaultContainerVpp, "sh", "-c", removeCmd) + if err != nil { + return fmt.Errorf("failed to remove lock file from VPP pod: %v", err) + } + + return nil +} + +// captureCleanupCommand performs forced cleanup of all capture operations +func captureCleanupCommand(k *KubeClient, nodeName string) error { + validatedNode, err := validateNodeName(k, nodeName) + if err != nil { + return err + } + + printColored("blue", fmt.Sprintf("Starting cleanup on node '%s'", validatedNode)) + + // Stop all active traces + printColored("blue", "Clearing traces...") + _, err = k.vppctl(validatedNode, "clear", "trace") + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear trace: %v", err)) + } else { + printColored("green", "trace cleared") + } + + // Stop PCAP captures + printColored("blue", "Stopping PCAP captures...") + _, err = k.vppctl(validatedNode, "pcap", "trace", "off") + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to stop PCAP trace: %v", err)) + } else { + printColored("green", "PCAP trace stopped") + } + + // Stop dispatch captures + printColored("blue", "Stopping dispatch captures...") + _, err = k.vppctl(validatedNode, "pcap", "dispatch", "trace", "off") + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to stop dispatch trace: %v", err)) + } else { + printColored("green", "dispatch trace stopped") + } + + // Clear BPF filters for both trace and pcap + printColored("blue", "Clearing BPF filters...") + err = clearBPFFilter(k, validatedNode, false) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear trace BPF filter: %v", err)) + } + err = clearBPFFilter(k, validatedNode, true) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to clear PCAP BPF filter: %v", err)) + } + + // Remove lock file + printColored("blue", "Removing lock file...") + err = removeCaptureLock(k, validatedNode) + if err != nil { + printColored("red", fmt.Sprintf("Warning: Failed to remove lock file: %v", err)) + } else { + printColored("green", "lock file removed") + } + + printColored("green", "Cleanup completed") + return nil +} diff --git a/config/config.go b/config/config.go index efbd6a56..1bf15ff2 100644 --- a/config/config.go +++ b/config/config.go @@ -472,6 +472,9 @@ type CalicoVppInitialConfigConfigType struct { //out of agent and vppmanager // HealthCheckPort is the port on which the health check HTTP server listens // Defaults to 9090 HealthCheckPort *uint32 `json:"healthCheckPort"` + // CaptureServerPort is the port on which the capture HTTP server listens + // for trace/pcap/dispatch commands. Defaults to 9999 + CaptureServerPort *uint32 `json:"captureServerPort"` } func (cfg *CalicoVppInitialConfigConfigType) Validate() (err error) { @@ -500,6 +503,9 @@ func (cfg *CalicoVppInitialConfigConfigType) Validate() (err error) { cfg.HealthCheckPort = DefaultToPtr( cfg.HealthCheckPort, 9090, ) + cfg.CaptureServerPort = DefaultToPtr( + cfg.CaptureServerPort, 9999, + ) return nil } func (cfg *CalicoVppInitialConfigConfigType) GetDefaultGWs() (gws []net.IP, err error) { diff --git a/vpplink/bpf.go b/vpplink/bpf.go new file mode 100644 index 00000000..8e1eeedb --- /dev/null +++ b/vpplink/bpf.go @@ -0,0 +1,143 @@ +// Copyright (C) 2025 Cisco Systems Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpplink + +import ( + "fmt" + + "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/bpf_trace_filter" + interfaces "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/interface" + "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/interface_types" +) + +// setPcapFilterFunction sets the filter function for PCAP capture +func (v *VppLink) setPcapFilterFunction(name string) error { + client := interfaces.NewServiceClient(v.GetConnection()) + _, err := client.PcapSetFilterFunction(v.GetContext(), &interfaces.PcapSetFilterFunction{ + FilterFunctionName: name, + }) + if err != nil { + return fmt.Errorf("failed to set pcap filter function: %w", err) + } + return nil +} + +// PcapSetDefaultFunction resets PCAP to use default filtering +func (v *VppLink) PcapSetDefaultFunction() error { + return v.setPcapFilterFunction("vnet_is_packet_traced") +} + +// SetBpfFunction enables BPF filtering for either PCAP or trace +func (v *VppLink) SetBpfFunction(isPcap bool) error { + if isPcap { + return v.setPcapFilterFunction("bpf_trace_filter") + } + return v.setTraceFilterFunction("bpf_trace_filter") +} + +// UnsetBpfFunction disables BPF filtering and reverts to default +func (v *VppLink) UnsetBpfFunction(isPcap bool) error { + if isPcap { + return v.PcapSetDefaultFunction() + } + return v.TraceSetDefaultFunction() +} + +// PcapTraceOn starts PCAP packet capture +func (v *VppLink) PcapTraceOn(filename string, maxPackets, maxBytesPerPacket, swIfIndex uint32, + captureRx, captureTx, captureDrop, useFilter, preallocateData, freeData bool, +) error { + client := interfaces.NewServiceClient(v.GetConnection()) + + _, err := client.PcapTraceOn(v.GetContext(), &interfaces.PcapTraceOn{ + CaptureRx: captureRx, + CaptureTx: captureTx, + CaptureDrop: captureDrop, + Filter: useFilter, + PreallocateData: preallocateData, + FreeData: freeData, + MaxPackets: maxPackets, + MaxBytesPerPacket: maxBytesPerPacket, + SwIfIndex: interface_types.InterfaceIndex(swIfIndex), + Filename: filename, + }) + if err != nil { + return fmt.Errorf("failed to start pcap trace on interface: %w", err) + } + return nil +} + +// PcapTraceOff stops PCAP packet capture +func (v *VppLink) PcapTraceOff() error { + client := interfaces.NewServiceClient(v.GetConnection()) + + _, err := client.PcapTraceOff(v.GetContext(), &interfaces.PcapTraceOff{}) + if err != nil { + return fmt.Errorf("failed to stop pcap trace on interface: %w", err) + } + return nil +} + +// PcapDispatchTraceOn starts dispatch trace capture +func (v *VppLink) PcapDispatchTraceOn(maxPackets uint32, filename string, useFilter bool) error { + client := interfaces.NewServiceClient(v.GetConnection()) + + _, err := client.PcapTraceOn(v.GetContext(), &interfaces.PcapTraceOn{ + CaptureRx: true, + CaptureTx: true, + CaptureDrop: false, + Filter: useFilter, + PreallocateData: false, + FreeData: false, + MaxPackets: maxPackets, + MaxBytesPerPacket: 0, + SwIfIndex: interface_types.InterfaceIndex(^uint32(0)), // all interfaces + Filename: filename, + }) + if err != nil { + return fmt.Errorf("failed to start dispatch trace: %w", err) + } + return nil +} + +// PcapDispatchTraceOff stops dispatch trace capture +func (v *VppLink) PcapDispatchTraceOff() error { + return v.PcapTraceOff() +} + +// bpfAddDelExpression adds or deletes a BPF filter expression +func (v *VppLink) bpfAddDelExpression(filter string, isAdd bool) error { + client := bpf_trace_filter.NewServiceClient(v.GetConnection()) + + _, err := client.BpfTraceFilterSetV2(v.GetContext(), &bpf_trace_filter.BpfTraceFilterSetV2{ + IsAdd: isAdd, + Filter: filter, + Optimize: true, + }) + if err != nil { + return fmt.Errorf("failed to update BPF filter: %w", err) + } + return nil +} + +// BpfAdd adds a BPF filter expression +func (v *VppLink) BpfAdd(filter string) error { + return v.bpfAddDelExpression(filter, true) +} + +// BpfDel removes all BPF filter expressions +func (v *VppLink) BpfDel() error { + return v.bpfAddDelExpression("", false) +} diff --git a/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter.ba.go b/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter.ba.go new file mode 100644 index 00000000..849fc94b --- /dev/null +++ b/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter.ba.go @@ -0,0 +1,202 @@ +// Code generated by GoVPP's binapi-generator. DO NOT EDIT. + +// Package bpf_trace_filter contains generated bindings for API file bpf_trace_filter.api. +// +// Contents: +// - 4 messages +package bpf_trace_filter + +import ( + api "go.fd.io/govpp/api" + codec "go.fd.io/govpp/codec" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the GoVPP api package it is being compiled against. +// A compilation error at this line likely means your copy of the +// GoVPP api package needs to be updated. +const _ = api.GoVppAPIPackageIsVersion2 + +const ( + APIFile = "bpf_trace_filter" + APIVersion = "0.1.0" + VersionCrc = 0xb682a79a +) + +// /* +// - bpf_trace_filter.api - BPF Trace filter API +// * +// - Copyright (c) 2023 Cisco and/or its affiliates +// - Licensed under the Apache License, Version 2.0 (the "License"); +// - you may not use this file except in compliance with the License. +// - You may obtain a copy of the License at: +// * +// - http://www.apache.org/licenses/LICENSE-2.0 +// * +// - Unless required by applicable law or agreed to in writing, software +// - distributed under the License is distributed on an "AS IS" BASIS, +// - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// - See the License for the specific language governing permissions and +// - limitations under the License. +// +// BpfTraceFilterSet defines message 'bpf_trace_filter_set'. +type BpfTraceFilterSet struct { + IsAdd bool `binapi:"bool,name=is_add,default=true" json:"is_add,omitempty"` + Filter string `binapi:"string[],name=filter" json:"filter,omitempty"` +} + +func (m *BpfTraceFilterSet) Reset() { *m = BpfTraceFilterSet{} } +func (*BpfTraceFilterSet) GetMessageName() string { return "bpf_trace_filter_set" } +func (*BpfTraceFilterSet) GetCrcString() string { return "3171346e" } +func (*BpfTraceFilterSet) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *BpfTraceFilterSet) Size() (size int) { + if m == nil { + return 0 + } + size += 1 // m.IsAdd + size += 4 + len(m.Filter) // m.Filter + return size +} +func (m *BpfTraceFilterSet) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeBool(m.IsAdd) + buf.EncodeString(m.Filter, 0) + return buf.Bytes(), nil +} +func (m *BpfTraceFilterSet) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.IsAdd = buf.DecodeBool() + m.Filter = buf.DecodeString(0) + return nil +} + +// BpfTraceFilterSetReply defines message 'bpf_trace_filter_set_reply'. +type BpfTraceFilterSetReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *BpfTraceFilterSetReply) Reset() { *m = BpfTraceFilterSetReply{} } +func (*BpfTraceFilterSetReply) GetMessageName() string { return "bpf_trace_filter_set_reply" } +func (*BpfTraceFilterSetReply) GetCrcString() string { return "e8d4e804" } +func (*BpfTraceFilterSetReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *BpfTraceFilterSetReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *BpfTraceFilterSetReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *BpfTraceFilterSetReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// BpfTraceFilterSetV2 defines message 'bpf_trace_filter_set_v2'. +type BpfTraceFilterSetV2 struct { + IsAdd bool `binapi:"bool,name=is_add,default=true" json:"is_add,omitempty"` + Optimize bool `binapi:"bool,name=optimize,default=true" json:"optimize,omitempty"` + Filter string `binapi:"string[],name=filter" json:"filter,omitempty"` +} + +func (m *BpfTraceFilterSetV2) Reset() { *m = BpfTraceFilterSetV2{} } +func (*BpfTraceFilterSetV2) GetMessageName() string { return "bpf_trace_filter_set_v2" } +func (*BpfTraceFilterSetV2) GetCrcString() string { return "5615acbf" } +func (*BpfTraceFilterSetV2) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *BpfTraceFilterSetV2) Size() (size int) { + if m == nil { + return 0 + } + size += 1 // m.IsAdd + size += 1 // m.Optimize + size += 4 + len(m.Filter) // m.Filter + return size +} +func (m *BpfTraceFilterSetV2) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeBool(m.IsAdd) + buf.EncodeBool(m.Optimize) + buf.EncodeString(m.Filter, 0) + return buf.Bytes(), nil +} +func (m *BpfTraceFilterSetV2) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.IsAdd = buf.DecodeBool() + m.Optimize = buf.DecodeBool() + m.Filter = buf.DecodeString(0) + return nil +} + +// BpfTraceFilterSetV2Reply defines message 'bpf_trace_filter_set_v2_reply'. +type BpfTraceFilterSetV2Reply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *BpfTraceFilterSetV2Reply) Reset() { *m = BpfTraceFilterSetV2Reply{} } +func (*BpfTraceFilterSetV2Reply) GetMessageName() string { return "bpf_trace_filter_set_v2_reply" } +func (*BpfTraceFilterSetV2Reply) GetCrcString() string { return "e8d4e804" } +func (*BpfTraceFilterSetV2Reply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *BpfTraceFilterSetV2Reply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *BpfTraceFilterSetV2Reply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *BpfTraceFilterSetV2Reply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +func init() { file_bpf_trace_filter_binapi_init() } +func file_bpf_trace_filter_binapi_init() { + api.RegisterMessage((*BpfTraceFilterSet)(nil), "bpf_trace_filter_set_3171346e") + api.RegisterMessage((*BpfTraceFilterSetReply)(nil), "bpf_trace_filter_set_reply_e8d4e804") + api.RegisterMessage((*BpfTraceFilterSetV2)(nil), "bpf_trace_filter_set_v2_5615acbf") + api.RegisterMessage((*BpfTraceFilterSetV2Reply)(nil), "bpf_trace_filter_set_v2_reply_e8d4e804") +} + +// Messages returns list of all messages in this module. +func AllMessages() []api.Message { + return []api.Message{ + (*BpfTraceFilterSet)(nil), + (*BpfTraceFilterSetReply)(nil), + (*BpfTraceFilterSetV2)(nil), + (*BpfTraceFilterSetV2Reply)(nil), + } +} diff --git a/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter_rpc.ba.go b/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter_rpc.ba.go new file mode 100644 index 00000000..02f9f7eb --- /dev/null +++ b/vpplink/generated/bindings/bpf_trace_filter/bpf_trace_filter_rpc.ba.go @@ -0,0 +1,41 @@ +// Code generated by GoVPP's binapi-generator. DO NOT EDIT. + +package bpf_trace_filter + +import ( + "context" + + api "go.fd.io/govpp/api" +) + +// RPCService defines RPC service bpf_trace_filter. +type RPCService interface { + BpfTraceFilterSet(ctx context.Context, in *BpfTraceFilterSet) (*BpfTraceFilterSetReply, error) + BpfTraceFilterSetV2(ctx context.Context, in *BpfTraceFilterSetV2) (*BpfTraceFilterSetV2Reply, error) +} + +type serviceClient struct { + conn api.Connection +} + +func NewServiceClient(conn api.Connection) RPCService { + return &serviceClient{conn} +} + +func (c *serviceClient) BpfTraceFilterSet(ctx context.Context, in *BpfTraceFilterSet) (*BpfTraceFilterSetReply, error) { + out := new(BpfTraceFilterSetReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) BpfTraceFilterSetV2(ctx context.Context, in *BpfTraceFilterSetV2) (*BpfTraceFilterSetV2Reply, error) { + out := new(BpfTraceFilterSetV2Reply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} diff --git a/vpplink/generated/bindings/tracedump/tracedump.ba.go b/vpplink/generated/bindings/tracedump/tracedump.ba.go new file mode 100644 index 00000000..36f22ba9 --- /dev/null +++ b/vpplink/generated/bindings/tracedump/tracedump.ba.go @@ -0,0 +1,793 @@ +// Code generated by GoVPP's binapi-generator. DO NOT EDIT. + +// Package tracedump contains generated bindings for API file tracedump.api. +// +// Contents: +// - 1 enum +// - 17 messages +package tracedump + +import ( + "strconv" + + api "go.fd.io/govpp/api" + codec "go.fd.io/govpp/codec" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the GoVPP api package it is being compiled against. +// A compilation error at this line likely means your copy of the +// GoVPP api package needs to be updated. +const _ = api.GoVppAPIPackageIsVersion2 + +const ( + APIFile = "tracedump" + APIVersion = "0.2.0" + VersionCrc = 0x56abf80a +) + +// TraceFilterFlag defines enum 'trace_filter_flag'. +type TraceFilterFlag uint32 + +const ( + TRACE_FF_NONE TraceFilterFlag = 0 + TRACE_FF_INCLUDE_NODE TraceFilterFlag = 1 + TRACE_FF_EXCLUDE_NODE TraceFilterFlag = 2 + TRACE_FF_INCLUDE_CLASSIFIER TraceFilterFlag = 3 + TRACE_FF_EXCLUDE_CLASSIFIER TraceFilterFlag = 4 +) + +var ( + TraceFilterFlag_name = map[uint32]string{ + 0: "TRACE_FF_NONE", + 1: "TRACE_FF_INCLUDE_NODE", + 2: "TRACE_FF_EXCLUDE_NODE", + 3: "TRACE_FF_INCLUDE_CLASSIFIER", + 4: "TRACE_FF_EXCLUDE_CLASSIFIER", + } + TraceFilterFlag_value = map[string]uint32{ + "TRACE_FF_NONE": 0, + "TRACE_FF_INCLUDE_NODE": 1, + "TRACE_FF_EXCLUDE_NODE": 2, + "TRACE_FF_INCLUDE_CLASSIFIER": 3, + "TRACE_FF_EXCLUDE_CLASSIFIER": 4, + } +) + +func (x TraceFilterFlag) String() string { + s, ok := TraceFilterFlag_name[uint32(x)] + if ok { + return s + } + return "TraceFilterFlag(" + strconv.Itoa(int(x)) + ")" +} + +// trace_capture_packets +// - node_index - graph input node whose packets are captured +// - max_packets - maximum number of packets to capture +// - use_filter - if true, apply filters to select/reject packets +// - verbose - if true, set verbose packet capture flag +// - pre_capture_clear - if true, clear buffer before capture begins +// +// TraceCapturePackets defines message 'trace_capture_packets'. +type TraceCapturePackets struct { + NodeIndex uint32 `binapi:"u32,name=node_index" json:"node_index,omitempty"` + MaxPackets uint32 `binapi:"u32,name=max_packets" json:"max_packets,omitempty"` + UseFilter bool `binapi:"bool,name=use_filter" json:"use_filter,omitempty"` + Verbose bool `binapi:"bool,name=verbose" json:"verbose,omitempty"` + PreCaptureClear bool `binapi:"bool,name=pre_capture_clear" json:"pre_capture_clear,omitempty"` +} + +func (m *TraceCapturePackets) Reset() { *m = TraceCapturePackets{} } +func (*TraceCapturePackets) GetMessageName() string { return "trace_capture_packets" } +func (*TraceCapturePackets) GetCrcString() string { return "9e791a9b" } +func (*TraceCapturePackets) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceCapturePackets) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.NodeIndex + size += 4 // m.MaxPackets + size += 1 // m.UseFilter + size += 1 // m.Verbose + size += 1 // m.PreCaptureClear + return size +} +func (m *TraceCapturePackets) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint32(m.NodeIndex) + buf.EncodeUint32(m.MaxPackets) + buf.EncodeBool(m.UseFilter) + buf.EncodeBool(m.Verbose) + buf.EncodeBool(m.PreCaptureClear) + return buf.Bytes(), nil +} +func (m *TraceCapturePackets) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.NodeIndex = buf.DecodeUint32() + m.MaxPackets = buf.DecodeUint32() + m.UseFilter = buf.DecodeBool() + m.Verbose = buf.DecodeBool() + m.PreCaptureClear = buf.DecodeBool() + return nil +} + +// TraceCapturePacketsReply defines message 'trace_capture_packets_reply'. +type TraceCapturePacketsReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *TraceCapturePacketsReply) Reset() { *m = TraceCapturePacketsReply{} } +func (*TraceCapturePacketsReply) GetMessageName() string { return "trace_capture_packets_reply" } +func (*TraceCapturePacketsReply) GetCrcString() string { return "e8d4e804" } +func (*TraceCapturePacketsReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceCapturePacketsReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *TraceCapturePacketsReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *TraceCapturePacketsReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// trace_clear_cache +// TraceClearCache defines message 'trace_clear_cache'. +type TraceClearCache struct{} + +func (m *TraceClearCache) Reset() { *m = TraceClearCache{} } +func (*TraceClearCache) GetMessageName() string { return "trace_clear_cache" } +func (*TraceClearCache) GetCrcString() string { return "51077d14" } +func (*TraceClearCache) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceClearCache) Size() (size int) { + if m == nil { + return 0 + } + return size +} +func (m *TraceClearCache) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + return buf.Bytes(), nil +} +func (m *TraceClearCache) Unmarshal(b []byte) error { + return nil +} + +// TraceClearCacheReply defines message 'trace_clear_cache_reply'. +type TraceClearCacheReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *TraceClearCacheReply) Reset() { *m = TraceClearCacheReply{} } +func (*TraceClearCacheReply) GetMessageName() string { return "trace_clear_cache_reply" } +func (*TraceClearCacheReply) GetCrcString() string { return "e8d4e804" } +func (*TraceClearCacheReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceClearCacheReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *TraceClearCacheReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *TraceClearCacheReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// trace_clear_capture +// TraceClearCapture defines message 'trace_clear_capture'. +type TraceClearCapture struct{} + +func (m *TraceClearCapture) Reset() { *m = TraceClearCapture{} } +func (*TraceClearCapture) GetMessageName() string { return "trace_clear_capture" } +func (*TraceClearCapture) GetCrcString() string { return "51077d14" } +func (*TraceClearCapture) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceClearCapture) Size() (size int) { + if m == nil { + return 0 + } + return size +} +func (m *TraceClearCapture) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + return buf.Bytes(), nil +} +func (m *TraceClearCapture) Unmarshal(b []byte) error { + return nil +} + +// TraceClearCaptureReply defines message 'trace_clear_capture_reply'. +type TraceClearCaptureReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *TraceClearCaptureReply) Reset() { *m = TraceClearCaptureReply{} } +func (*TraceClearCaptureReply) GetMessageName() string { return "trace_clear_capture_reply" } +func (*TraceClearCaptureReply) GetCrcString() string { return "e8d4e804" } +func (*TraceClearCaptureReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceClearCaptureReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *TraceClearCaptureReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *TraceClearCaptureReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// TraceDetails defines message 'trace_details'. +type TraceDetails struct { + ThreadID uint32 `binapi:"u32,name=thread_id" json:"thread_id,omitempty"` + Position uint32 `binapi:"u32,name=position" json:"position,omitempty"` + MoreThisThread uint8 `binapi:"u8,name=more_this_thread" json:"more_this_thread,omitempty"` + MoreThreads uint8 `binapi:"u8,name=more_threads" json:"more_threads,omitempty"` + Done uint8 `binapi:"u8,name=done" json:"done,omitempty"` + PacketNumber uint32 `binapi:"u32,name=packet_number" json:"packet_number,omitempty"` + TraceData string `binapi:"string[],name=trace_data" json:"trace_data,omitempty"` +} + +func (m *TraceDetails) Reset() { *m = TraceDetails{} } +func (*TraceDetails) GetMessageName() string { return "trace_details" } +func (*TraceDetails) GetCrcString() string { return "1553e9eb" } +func (*TraceDetails) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceDetails) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.ThreadID + size += 4 // m.Position + size += 1 // m.MoreThisThread + size += 1 // m.MoreThreads + size += 1 // m.Done + size += 4 // m.PacketNumber + size += 4 + len(m.TraceData) // m.TraceData + return size +} +func (m *TraceDetails) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint32(m.ThreadID) + buf.EncodeUint32(m.Position) + buf.EncodeUint8(m.MoreThisThread) + buf.EncodeUint8(m.MoreThreads) + buf.EncodeUint8(m.Done) + buf.EncodeUint32(m.PacketNumber) + buf.EncodeString(m.TraceData, 0) + return buf.Bytes(), nil +} +func (m *TraceDetails) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.ThreadID = buf.DecodeUint32() + m.Position = buf.DecodeUint32() + m.MoreThisThread = buf.DecodeUint8() + m.MoreThreads = buf.DecodeUint8() + m.Done = buf.DecodeUint8() + m.PacketNumber = buf.DecodeUint32() + m.TraceData = buf.DecodeString(0) + return nil +} + +// TraceDump defines message 'trace_dump'. +type TraceDump struct { + ClearCache uint8 `binapi:"u8,name=clear_cache" json:"clear_cache,omitempty"` + ThreadID uint32 `binapi:"u32,name=thread_id" json:"thread_id,omitempty"` + Position uint32 `binapi:"u32,name=position" json:"position,omitempty"` + MaxRecords uint32 `binapi:"u32,name=max_records" json:"max_records,omitempty"` +} + +func (m *TraceDump) Reset() { *m = TraceDump{} } +func (*TraceDump) GetMessageName() string { return "trace_dump" } +func (*TraceDump) GetCrcString() string { return "c7d6681f" } +func (*TraceDump) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceDump) Size() (size int) { + if m == nil { + return 0 + } + size += 1 // m.ClearCache + size += 4 // m.ThreadID + size += 4 // m.Position + size += 4 // m.MaxRecords + return size +} +func (m *TraceDump) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint8(m.ClearCache) + buf.EncodeUint32(m.ThreadID) + buf.EncodeUint32(m.Position) + buf.EncodeUint32(m.MaxRecords) + return buf.Bytes(), nil +} +func (m *TraceDump) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.ClearCache = buf.DecodeUint8() + m.ThreadID = buf.DecodeUint32() + m.Position = buf.DecodeUint32() + m.MaxRecords = buf.DecodeUint32() + return nil +} + +// TraceDumpReply defines message 'trace_dump_reply'. +type TraceDumpReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` + LastThreadID uint32 `binapi:"u32,name=last_thread_id" json:"last_thread_id,omitempty"` + LastPosition uint32 `binapi:"u32,name=last_position" json:"last_position,omitempty"` + MoreThisThread uint8 `binapi:"u8,name=more_this_thread" json:"more_this_thread,omitempty"` + MoreThreads uint8 `binapi:"u8,name=more_threads" json:"more_threads,omitempty"` + FlushOnly uint8 `binapi:"u8,name=flush_only" json:"flush_only,omitempty"` + Done uint8 `binapi:"u8,name=done" json:"done,omitempty"` +} + +func (m *TraceDumpReply) Reset() { *m = TraceDumpReply{} } +func (*TraceDumpReply) GetMessageName() string { return "trace_dump_reply" } +func (*TraceDumpReply) GetCrcString() string { return "e0e87f9d" } +func (*TraceDumpReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceDumpReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + size += 4 // m.LastThreadID + size += 4 // m.LastPosition + size += 1 // m.MoreThisThread + size += 1 // m.MoreThreads + size += 1 // m.FlushOnly + size += 1 // m.Done + return size +} +func (m *TraceDumpReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + buf.EncodeUint32(m.LastThreadID) + buf.EncodeUint32(m.LastPosition) + buf.EncodeUint8(m.MoreThisThread) + buf.EncodeUint8(m.MoreThreads) + buf.EncodeUint8(m.FlushOnly) + buf.EncodeUint8(m.Done) + return buf.Bytes(), nil +} +func (m *TraceDumpReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + m.LastThreadID = buf.DecodeUint32() + m.LastPosition = buf.DecodeUint32() + m.MoreThisThread = buf.DecodeUint8() + m.MoreThreads = buf.DecodeUint8() + m.FlushOnly = buf.DecodeUint8() + m.Done = buf.DecodeUint8() + return nil +} + +// TraceFilterFunctionDetails defines message 'trace_filter_function_details'. +type TraceFilterFunctionDetails struct { + Selected bool `binapi:"bool,name=selected" json:"selected,omitempty"` + Name string `binapi:"string[],name=name" json:"name,omitempty"` +} + +func (m *TraceFilterFunctionDetails) Reset() { *m = TraceFilterFunctionDetails{} } +func (*TraceFilterFunctionDetails) GetMessageName() string { return "trace_filter_function_details" } +func (*TraceFilterFunctionDetails) GetCrcString() string { return "28821359" } +func (*TraceFilterFunctionDetails) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceFilterFunctionDetails) Size() (size int) { + if m == nil { + return 0 + } + size += 1 // m.Selected + size += 4 + len(m.Name) // m.Name + return size +} +func (m *TraceFilterFunctionDetails) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeBool(m.Selected) + buf.EncodeString(m.Name, 0) + return buf.Bytes(), nil +} +func (m *TraceFilterFunctionDetails) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Selected = buf.DecodeBool() + m.Name = buf.DecodeString(0) + return nil +} + +// TraceFilterFunctionDump defines message 'trace_filter_function_dump'. +type TraceFilterFunctionDump struct{} + +func (m *TraceFilterFunctionDump) Reset() { *m = TraceFilterFunctionDump{} } +func (*TraceFilterFunctionDump) GetMessageName() string { return "trace_filter_function_dump" } +func (*TraceFilterFunctionDump) GetCrcString() string { return "51077d14" } +func (*TraceFilterFunctionDump) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceFilterFunctionDump) Size() (size int) { + if m == nil { + return 0 + } + return size +} +func (m *TraceFilterFunctionDump) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + return buf.Bytes(), nil +} +func (m *TraceFilterFunctionDump) Unmarshal(b []byte) error { + return nil +} + +// TraceSetFilterFunction defines message 'trace_set_filter_function'. +type TraceSetFilterFunction struct { + FilterFunctionName string `binapi:"string[],name=filter_function_name" json:"filter_function_name,omitempty"` +} + +func (m *TraceSetFilterFunction) Reset() { *m = TraceSetFilterFunction{} } +func (*TraceSetFilterFunction) GetMessageName() string { return "trace_set_filter_function" } +func (*TraceSetFilterFunction) GetCrcString() string { return "616abb92" } +func (*TraceSetFilterFunction) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceSetFilterFunction) Size() (size int) { + if m == nil { + return 0 + } + size += 4 + len(m.FilterFunctionName) // m.FilterFunctionName + return size +} +func (m *TraceSetFilterFunction) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeString(m.FilterFunctionName, 0) + return buf.Bytes(), nil +} +func (m *TraceSetFilterFunction) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.FilterFunctionName = buf.DecodeString(0) + return nil +} + +// TraceSetFilterFunctionReply defines message 'trace_set_filter_function_reply'. +type TraceSetFilterFunctionReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *TraceSetFilterFunctionReply) Reset() { *m = TraceSetFilterFunctionReply{} } +func (*TraceSetFilterFunctionReply) GetMessageName() string { return "trace_set_filter_function_reply" } +func (*TraceSetFilterFunctionReply) GetCrcString() string { return "e8d4e804" } +func (*TraceSetFilterFunctionReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceSetFilterFunctionReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *TraceSetFilterFunctionReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *TraceSetFilterFunctionReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// trace_set_filters +// - flag - One of the trace_filter_flag values +// - node_index = The node-index to include/exclude +// - classifier_table_index = The include/exclude classifier table +// - count = The number of packets to include/exclude +// +// TraceSetFilters defines message 'trace_set_filters'. +type TraceSetFilters struct { + Flag TraceFilterFlag `binapi:"trace_filter_flag,name=flag" json:"flag,omitempty"` + Count uint32 `binapi:"u32,name=count" json:"count,omitempty"` + NodeIndex uint32 `binapi:"u32,name=node_index,default=4294967295" json:"node_index,omitempty"` + ClassifierTableIndex uint32 `binapi:"u32,name=classifier_table_index,default=4294967295" json:"classifier_table_index,omitempty"` +} + +func (m *TraceSetFilters) Reset() { *m = TraceSetFilters{} } +func (*TraceSetFilters) GetMessageName() string { return "trace_set_filters" } +func (*TraceSetFilters) GetCrcString() string { return "f522b44a" } +func (*TraceSetFilters) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceSetFilters) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Flag + size += 4 // m.Count + size += 4 // m.NodeIndex + size += 4 // m.ClassifierTableIndex + return size +} +func (m *TraceSetFilters) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint32(uint32(m.Flag)) + buf.EncodeUint32(m.Count) + buf.EncodeUint32(m.NodeIndex) + buf.EncodeUint32(m.ClassifierTableIndex) + return buf.Bytes(), nil +} +func (m *TraceSetFilters) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Flag = TraceFilterFlag(buf.DecodeUint32()) + m.Count = buf.DecodeUint32() + m.NodeIndex = buf.DecodeUint32() + m.ClassifierTableIndex = buf.DecodeUint32() + return nil +} + +// TraceSetFiltersReply defines message 'trace_set_filters_reply'. +type TraceSetFiltersReply struct { + Retval int32 `binapi:"i32,name=retval" json:"retval,omitempty"` +} + +func (m *TraceSetFiltersReply) Reset() { *m = TraceSetFiltersReply{} } +func (*TraceSetFiltersReply) GetMessageName() string { return "trace_set_filters_reply" } +func (*TraceSetFiltersReply) GetCrcString() string { return "e8d4e804" } +func (*TraceSetFiltersReply) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceSetFiltersReply) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.Retval + return size +} +func (m *TraceSetFiltersReply) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeInt32(m.Retval) + return buf.Bytes(), nil +} +func (m *TraceSetFiltersReply) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.Retval = buf.DecodeInt32() + return nil +} + +// trace_v2_details +// - thread_id - thread index from which the packet come from +// - position - position of the packet in its thread cache +// - more - true if there is still more packets to dump for this thread +// - trace_data - string packet data +// +// TraceV2Details defines message 'trace_v2_details'. +type TraceV2Details struct { + ThreadID uint32 `binapi:"u32,name=thread_id" json:"thread_id,omitempty"` + Position uint32 `binapi:"u32,name=position" json:"position,omitempty"` + More bool `binapi:"bool,name=more" json:"more,omitempty"` + TraceData string `binapi:"string[],name=trace_data" json:"trace_data,omitempty"` +} + +func (m *TraceV2Details) Reset() { *m = TraceV2Details{} } +func (*TraceV2Details) GetMessageName() string { return "trace_v2_details" } +func (*TraceV2Details) GetCrcString() string { return "91f87d52" } +func (*TraceV2Details) GetMessageType() api.MessageType { + return api.ReplyMessage +} + +func (m *TraceV2Details) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.ThreadID + size += 4 // m.Position + size += 1 // m.More + size += 4 + len(m.TraceData) // m.TraceData + return size +} +func (m *TraceV2Details) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint32(m.ThreadID) + buf.EncodeUint32(m.Position) + buf.EncodeBool(m.More) + buf.EncodeString(m.TraceData, 0) + return buf.Bytes(), nil +} +func (m *TraceV2Details) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.ThreadID = buf.DecodeUint32() + m.Position = buf.DecodeUint32() + m.More = buf.DecodeBool() + m.TraceData = buf.DecodeString(0) + return nil +} + +// trace_v2_dump +// - thread_id - specific thread to dump from, ~0 to dump from all +// - position - position of the first packet to dump in the per thread cache, ~0 to only clear the cache +// - max - maximum of packets to dump from each thread +// - clear_cache - dispose of any cached data before we begin +// +// TraceV2Dump defines message 'trace_v2_dump'. +type TraceV2Dump struct { + ThreadID uint32 `binapi:"u32,name=thread_id,default=4294967295" json:"thread_id,omitempty"` + Position uint32 `binapi:"u32,name=position" json:"position,omitempty"` + Max uint32 `binapi:"u32,name=max,default=50" json:"max,omitempty"` + ClearCache bool `binapi:"bool,name=clear_cache" json:"clear_cache,omitempty"` +} + +func (m *TraceV2Dump) Reset() { *m = TraceV2Dump{} } +func (*TraceV2Dump) GetMessageName() string { return "trace_v2_dump" } +func (*TraceV2Dump) GetCrcString() string { return "83f88d8e" } +func (*TraceV2Dump) GetMessageType() api.MessageType { + return api.RequestMessage +} + +func (m *TraceV2Dump) Size() (size int) { + if m == nil { + return 0 + } + size += 4 // m.ThreadID + size += 4 // m.Position + size += 4 // m.Max + size += 1 // m.ClearCache + return size +} +func (m *TraceV2Dump) Marshal(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, m.Size()) + } + buf := codec.NewBuffer(b) + buf.EncodeUint32(m.ThreadID) + buf.EncodeUint32(m.Position) + buf.EncodeUint32(m.Max) + buf.EncodeBool(m.ClearCache) + return buf.Bytes(), nil +} +func (m *TraceV2Dump) Unmarshal(b []byte) error { + buf := codec.NewBuffer(b) + m.ThreadID = buf.DecodeUint32() + m.Position = buf.DecodeUint32() + m.Max = buf.DecodeUint32() + m.ClearCache = buf.DecodeBool() + return nil +} + +func init() { file_tracedump_binapi_init() } +func file_tracedump_binapi_init() { + api.RegisterMessage((*TraceCapturePackets)(nil), "trace_capture_packets_9e791a9b") + api.RegisterMessage((*TraceCapturePacketsReply)(nil), "trace_capture_packets_reply_e8d4e804") + api.RegisterMessage((*TraceClearCache)(nil), "trace_clear_cache_51077d14") + api.RegisterMessage((*TraceClearCacheReply)(nil), "trace_clear_cache_reply_e8d4e804") + api.RegisterMessage((*TraceClearCapture)(nil), "trace_clear_capture_51077d14") + api.RegisterMessage((*TraceClearCaptureReply)(nil), "trace_clear_capture_reply_e8d4e804") + api.RegisterMessage((*TraceDetails)(nil), "trace_details_1553e9eb") + api.RegisterMessage((*TraceDump)(nil), "trace_dump_c7d6681f") + api.RegisterMessage((*TraceDumpReply)(nil), "trace_dump_reply_e0e87f9d") + api.RegisterMessage((*TraceFilterFunctionDetails)(nil), "trace_filter_function_details_28821359") + api.RegisterMessage((*TraceFilterFunctionDump)(nil), "trace_filter_function_dump_51077d14") + api.RegisterMessage((*TraceSetFilterFunction)(nil), "trace_set_filter_function_616abb92") + api.RegisterMessage((*TraceSetFilterFunctionReply)(nil), "trace_set_filter_function_reply_e8d4e804") + api.RegisterMessage((*TraceSetFilters)(nil), "trace_set_filters_f522b44a") + api.RegisterMessage((*TraceSetFiltersReply)(nil), "trace_set_filters_reply_e8d4e804") + api.RegisterMessage((*TraceV2Details)(nil), "trace_v2_details_91f87d52") + api.RegisterMessage((*TraceV2Dump)(nil), "trace_v2_dump_83f88d8e") +} + +// Messages returns list of all messages in this module. +func AllMessages() []api.Message { + return []api.Message{ + (*TraceCapturePackets)(nil), + (*TraceCapturePacketsReply)(nil), + (*TraceClearCache)(nil), + (*TraceClearCacheReply)(nil), + (*TraceClearCapture)(nil), + (*TraceClearCaptureReply)(nil), + (*TraceDetails)(nil), + (*TraceDump)(nil), + (*TraceDumpReply)(nil), + (*TraceFilterFunctionDetails)(nil), + (*TraceFilterFunctionDump)(nil), + (*TraceSetFilterFunction)(nil), + (*TraceSetFilterFunctionReply)(nil), + (*TraceSetFilters)(nil), + (*TraceSetFiltersReply)(nil), + (*TraceV2Details)(nil), + (*TraceV2Dump)(nil), + } +} diff --git a/vpplink/generated/bindings/tracedump/tracedump_rpc.ba.go b/vpplink/generated/bindings/tracedump/tracedump_rpc.ba.go new file mode 100644 index 00000000..dc8ae623 --- /dev/null +++ b/vpplink/generated/bindings/tracedump/tracedump_rpc.ba.go @@ -0,0 +1,207 @@ +// Code generated by GoVPP's binapi-generator. DO NOT EDIT. + +package tracedump + +import ( + "context" + "fmt" + "io" + + memclnt "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/memclnt" + api "go.fd.io/govpp/api" +) + +// RPCService defines RPC service tracedump. +type RPCService interface { + TraceCapturePackets(ctx context.Context, in *TraceCapturePackets) (*TraceCapturePacketsReply, error) + TraceClearCache(ctx context.Context, in *TraceClearCache) (*TraceClearCacheReply, error) + TraceClearCapture(ctx context.Context, in *TraceClearCapture) (*TraceClearCaptureReply, error) + TraceDump(ctx context.Context, in *TraceDump) (RPCService_TraceDumpClient, error) + TraceFilterFunctionDump(ctx context.Context, in *TraceFilterFunctionDump) (RPCService_TraceFilterFunctionDumpClient, error) + TraceSetFilterFunction(ctx context.Context, in *TraceSetFilterFunction) (*TraceSetFilterFunctionReply, error) + TraceSetFilters(ctx context.Context, in *TraceSetFilters) (*TraceSetFiltersReply, error) + TraceV2Dump(ctx context.Context, in *TraceV2Dump) (RPCService_TraceV2DumpClient, error) +} + +type serviceClient struct { + conn api.Connection +} + +func NewServiceClient(conn api.Connection) RPCService { + return &serviceClient{conn} +} + +func (c *serviceClient) TraceCapturePackets(ctx context.Context, in *TraceCapturePackets) (*TraceCapturePacketsReply, error) { + out := new(TraceCapturePacketsReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) TraceClearCache(ctx context.Context, in *TraceClearCache) (*TraceClearCacheReply, error) { + out := new(TraceClearCacheReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) TraceClearCapture(ctx context.Context, in *TraceClearCapture) (*TraceClearCaptureReply, error) { + out := new(TraceClearCaptureReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) TraceDump(ctx context.Context, in *TraceDump) (RPCService_TraceDumpClient, error) { + stream, err := c.conn.NewStream(ctx) + if err != nil { + return nil, err + } + x := &serviceClient_TraceDumpClient{stream} + if err := x.Stream.SendMsg(in); err != nil { + return nil, err + } + return x, nil +} + +type RPCService_TraceDumpClient interface { + Recv() (*TraceDetails, *TraceDumpReply, error) + api.Stream +} + +type serviceClient_TraceDumpClient struct { + api.Stream +} + +func (c *serviceClient_TraceDumpClient) Recv() (*TraceDetails, *TraceDumpReply, error) { + msg, err := c.Stream.RecvMsg() + if err != nil { + return nil, nil, err + } + switch m := msg.(type) { + case *TraceDetails: + return m, nil, nil + case *TraceDumpReply: + if err := api.RetvalToVPPApiError(m.Retval); err != nil { + c.Stream.Close() + return nil, m, err + } + err = c.Stream.Close() + if err != nil { + return nil, m, err + } + return nil, m, io.EOF + default: + return nil, nil, fmt.Errorf("unexpected message: %T %v", m, m) + } +} + +func (c *serviceClient) TraceFilterFunctionDump(ctx context.Context, in *TraceFilterFunctionDump) (RPCService_TraceFilterFunctionDumpClient, error) { + stream, err := c.conn.NewStream(ctx) + if err != nil { + return nil, err + } + x := &serviceClient_TraceFilterFunctionDumpClient{stream} + if err := x.Stream.SendMsg(in); err != nil { + return nil, err + } + if err = x.Stream.SendMsg(&memclnt.ControlPing{}); err != nil { + return nil, err + } + return x, nil +} + +type RPCService_TraceFilterFunctionDumpClient interface { + Recv() (*TraceFilterFunctionDetails, error) + api.Stream +} + +type serviceClient_TraceFilterFunctionDumpClient struct { + api.Stream +} + +func (c *serviceClient_TraceFilterFunctionDumpClient) Recv() (*TraceFilterFunctionDetails, error) { + msg, err := c.Stream.RecvMsg() + if err != nil { + return nil, err + } + switch m := msg.(type) { + case *TraceFilterFunctionDetails: + return m, nil + case *memclnt.ControlPingReply: + err = c.Stream.Close() + if err != nil { + return nil, err + } + return nil, io.EOF + default: + return nil, fmt.Errorf("unexpected message: %T %v", m, m) + } +} + +func (c *serviceClient) TraceSetFilterFunction(ctx context.Context, in *TraceSetFilterFunction) (*TraceSetFilterFunctionReply, error) { + out := new(TraceSetFilterFunctionReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) TraceSetFilters(ctx context.Context, in *TraceSetFilters) (*TraceSetFiltersReply, error) { + out := new(TraceSetFiltersReply) + err := c.conn.Invoke(ctx, in, out) + if err != nil { + return nil, err + } + return out, api.RetvalToVPPApiError(out.Retval) +} + +func (c *serviceClient) TraceV2Dump(ctx context.Context, in *TraceV2Dump) (RPCService_TraceV2DumpClient, error) { + stream, err := c.conn.NewStream(ctx) + if err != nil { + return nil, err + } + x := &serviceClient_TraceV2DumpClient{stream} + if err := x.Stream.SendMsg(in); err != nil { + return nil, err + } + if err = x.Stream.SendMsg(&memclnt.ControlPing{}); err != nil { + return nil, err + } + return x, nil +} + +type RPCService_TraceV2DumpClient interface { + Recv() (*TraceV2Details, error) + api.Stream +} + +type serviceClient_TraceV2DumpClient struct { + api.Stream +} + +func (c *serviceClient_TraceV2DumpClient) Recv() (*TraceV2Details, error) { + msg, err := c.Stream.RecvMsg() + if err != nil { + return nil, err + } + switch m := msg.(type) { + case *TraceV2Details: + return m, nil + case *memclnt.ControlPingReply: + err = c.Stream.Close() + if err != nil { + return nil, err + } + return nil, io.EOF + default: + return nil, fmt.Errorf("unexpected message: %T %v", m, m) + } +} diff --git a/vpplink/generated/gen.go b/vpplink/generated/gen.go index 88b30948..edc212f5 100644 --- a/vpplink/generated/gen.go +++ b/vpplink/generated/gen.go @@ -8,4 +8,4 @@ import ( ) //go:generate go build -buildmode=plugin -o ./.bin/vpplink_plugin.so github.com/calico-vpp/vpplink/pkg -//go:generate go run go.fd.io/govpp/cmd/binapi-generator --no-version-info --no-source-path-info --gen rpc,./.bin/vpplink_plugin.so -o ./bindings --input $VPP_DIR ikev2 gso arp interface ip ipip ipsec ip_neighbor tapv2 nat44_ed cnat af_packet feature ip6_nd punt vxlan af_xdp vlib virtio avf wireguard npol memif acl abf crypto_sw_scheduler sr rdma vmxnet3 pbl memclnt session vpe urpf classify ip_session_redirect +//go:generate go run go.fd.io/govpp/cmd/binapi-generator --no-version-info --no-source-path-info --gen rpc,./.bin/vpplink_plugin.so -o ./bindings --input $VPP_DIR ikev2 gso arp interface ip ipip ipsec ip_neighbor tapv2 nat44_ed cnat af_packet feature ip6_nd punt vxlan af_xdp vlib virtio avf wireguard npol memif acl abf crypto_sw_scheduler sr rdma vmxnet3 pbl memclnt session vpe urpf classify ip_session_redirect bpf_trace_filter tracedump diff --git a/vpplink/generated/generate.log b/vpplink/generated/generate.log index d7e1ed79..f0d65950 100755 --- a/vpplink/generated/generate.log +++ b/vpplink/generated/generate.log @@ -1,10 +1,12 @@ -VPP Version : 25.10.0-8~gd4f438470 +VPP Version : 25.10.0-10~g83a8ed2c9 Binapi-generator version : v0.11.0 VPP Base commit : 4f366b5bb misc: Initial changes for stable/2510 branch ------------------ Cherry picked commits -------------------- acl: acl-plugin custom policies cnat: [WIP] no k8s maglev from pods pbl: Port based balancer +gerrit:44467/2 bpf_trace_filter: add raw IP packet support +gerrit:44464/4 dispatch-trace: add filter support for pcap dispatch trace gerrit:43952/2 npol: fix test-debug gerrit:43710/12 npol: Network Policies plugin gerrit:revert:39675/5 Revert "ip-neighbor: do not use sas to determine NS source address" diff --git a/vpplink/generated/vpp_clone_current.sh b/vpplink/generated/vpp_clone_current.sh index ee0603a4..beec2122 100755 --- a/vpplink/generated/vpp_clone_current.sh +++ b/vpplink/generated/vpp_clone_current.sh @@ -119,6 +119,10 @@ git_revert refs/changes/75/39675/5 # ip-neighbor: do not use sas to determine N git_cherry_pick refs/changes/10/43710/12 # 43710: npol: Network Policies plugin | https://gerrit.fd.io/r/c/vpp/+/43710 git_cherry_pick refs/changes/52/43952/2 # 43952: npol: fix test-debug | https://gerrit.fd.io/r/c/vpp/+/43952 +# bpf_trace_filter: add filter support for pcap dispatch trace and raw IP packet support +git_cherry_pick refs/changes/64/44464/4 # 44464: dispatch-trace: add filter support for pcap dispatch trace | https://gerrit.fd.io/r/c/vpp/+/44464 +git_cherry_pick refs/changes/67/44467/2 # 44467: bpf_trace_filter: add raw IP packet support | https://gerrit.fd.io/r/c/vpp/+/44467 + # --------------- private plugins --------------- # Generated with 'git format-patch --zero-commit -o ./patches/ HEAD^^^' git_apply_private 0001-pbl-Port-based-balancer.patch diff --git a/vpplink/tracedump.go b/vpplink/tracedump.go new file mode 100644 index 00000000..b0a9c691 --- /dev/null +++ b/vpplink/tracedump.go @@ -0,0 +1,106 @@ +// Copyright (C) 2025 Cisco Systems Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpplink + +import ( + "fmt" + + "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/tracedump" +) + +// setTraceFilterFunction sets the filter function for trace +func (v *VppLink) setTraceFilterFunction(name string) error { + client := tracedump.NewServiceClient(v.GetConnection()) + _, err := client.TraceSetFilterFunction(v.GetContext(), &tracedump.TraceSetFilterFunction{ + FilterFunctionName: name, + }) + if err != nil { + return fmt.Errorf("failed to set trace filter function: %w", err) + } + return nil +} + +// TraceSetDefaultFunction resets trace to use default filtering +func (v *VppLink) TraceSetDefaultFunction() error { + return v.setTraceFilterFunction("vnet_is_packet_traced") +} + +// TraceCapture starts VPP trace capture on an input node +func (v *VppLink) TraceCapture(inputNode uint32, maxPackets uint32, useFilter bool) error { + client := tracedump.NewServiceClient(v.GetConnection()) + + _, err := client.TraceCapturePackets(v.GetContext(), &tracedump.TraceCapturePackets{ + NodeIndex: inputNode, + MaxPackets: maxPackets, + UseFilter: useFilter, + Verbose: true, + PreCaptureClear: true, + }) + if err != nil { + return fmt.Errorf("failed to capture trace: %w", err) + } + return nil +} + +// TraceClear clears the trace buffer +func (v *VppLink) TraceClear() error { + client := tracedump.NewServiceClient(v.GetConnection()) + + _, err := client.TraceClearCapture(v.GetContext(), &tracedump.TraceClearCapture{}) + if err != nil { + return fmt.Errorf("failed to clear capture trace: %w", err) + } + return nil +} + +// TraceDump dumps the trace buffer and returns the trace output as a string +func (v *VppLink) TraceDump() (string, error) { + client := tracedump.NewServiceClient(v.GetConnection()) + + stream, err := client.TraceDump(v.GetContext(), &tracedump.TraceDump{ + ClearCache: 1, + ThreadID: 0, + Position: 0, + MaxRecords: 50000, // assuming a max count of 50000 + }) + if err != nil { + return "", fmt.Errorf("failed to start trace dump: %w", err) + } + + var result string + for { + details, reply, err := stream.Recv() + if err != nil { + // Check if it's EOF (end of stream) + if err.Error() == "EOF" { + break + } + return result, fmt.Errorf("failed to receive trace details: %w", err) + } + + if details != nil { + result += details.TraceData + if details.Done != 0 { + break + } + } + + if reply != nil { + break + } + } + + return result, nil +}