Skip to content

Nim integration #159

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Jul 15, 2025
27 changes: 22 additions & 5 deletions cmd/nginx-supportpkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ func Execute() {

var namespaces []string
var product string
var excludeDBData bool
var excludeTimeSeriesData bool
var jobList []jobs.Job

var rootCmd = &cobra.Command{
Expand All @@ -47,6 +49,13 @@ func Execute() {
os.Exit(1)
}

if excludeDBData {
collector.ExcludeDBData = true
}
if excludeTimeSeriesData {
collector.ExcludeTimeSeriesData = true
}

collector.Logger.Printf("Starting kubectl-nginx-supportpkg - version: %s - build: %s", version.Version, version.Build)
collector.Logger.Printf("Input args are %v", os.Args)

Expand All @@ -57,17 +66,21 @@ func Execute() {
jobList = slices.Concat(jobs.CommonJobList(), jobs.NGFJobList())
case "ngx":
jobList = slices.Concat(jobs.CommonJobList(), jobs.NGXJobList())
case "nim":
jobList = slices.Concat(jobs.CommonJobList(), jobs.NIMJobList())
default:
fmt.Printf("Error: product must be in the following list: [nic, ngf, ngx]\n")
fmt.Printf("Error: product must be in the following list: [nic, ngf, ngx, nim]\n")
os.Exit(1)
}

if collector.AllNamespacesExist() {
failedJobs := 0
for _, job := range jobList {
fmt.Printf("Running job %s...", job.Name)
err = job.Collect(collector)
if err != nil {
err, Skipped := job.Collect(collector)
if Skipped {
fmt.Print(" SKIPPED\n")
} else if err != nil {
fmt.Printf(" Error: %s\n", err)
failedJobs++
} else {
Expand Down Expand Up @@ -106,6 +119,9 @@ func Execute() {
os.Exit(1)
}

rootCmd.Flags().BoolVarP(&excludeDBData, "exclude-db-data", "d", false, "exclude DB data collection")
rootCmd.Flags().BoolVarP(&excludeTimeSeriesData, "exclude-time-series-data", "t", false, "exclude time series data collection")

versionStr := "nginx-supportpkg - version: " + version.Version + " - build: " + version.Build + "\n"
rootCmd.SetVersionTemplate(versionStr)
rootCmd.Version = versionStr
Expand All @@ -115,8 +131,9 @@ func Execute() {
"Usage:" +
"\n nginx-supportpkg -h|--help" +
"\n nginx-supportpkg -v|--version" +
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nic,ngf,ngx]" +
"\n nginx-supportpkg [-n|--namespace] ns1,ns2 [-p|--product] [nic,ngf,ngx] \n")
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nic,ngf,ngx,nim]" +
"\n nginx-supportpkg [-n|--namespace] ns1,ns2 [-p|--product] [nic,ngf,ngx,nim]" +
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nim] [-d|--exclude-db-data] [-t|--exclude-time-series-data] \n")

if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
Expand Down
98 changes: 83 additions & 15 deletions pkg/data_collector/data_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,15 @@ import (
"compress/gzip"
"context"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strconv"
"time"

helmClient "github.com/mittwald/go-helm-client"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds"
"io"
corev1 "k8s.io/api/core/v1"
crdClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -38,23 +44,20 @@ import (
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/homedir"
metricsClient "k8s.io/metrics/pkg/client/clientset/versioned"
"log"
"os"
"path/filepath"
"strconv"
"time"
)

type DataCollector struct {
BaseDir string
Namespaces []string
Logger *log.Logger
LogFile *os.File
K8sRestConfig *rest.Config
K8sCoreClientSet *kubernetes.Clientset
K8sCrdClientSet *crdClient.Clientset
K8sMetricsClientSet *metricsClient.Clientset
K8sHelmClientSet map[string]helmClient.Client
BaseDir string
Namespaces []string
Logger *log.Logger
LogFile *os.File
K8sRestConfig *rest.Config
K8sCoreClientSet *kubernetes.Clientset
K8sCrdClientSet *crdClient.Clientset
K8sMetricsClientSet *metricsClient.Clientset
K8sHelmClientSet map[string]helmClient.Client
ExcludeDBData bool
ExcludeTimeSeriesData bool
}

func NewDataCollector(namespaces ...string) (*DataCollector, error) {
Expand Down Expand Up @@ -266,3 +269,68 @@ func (c *DataCollector) AllNamespacesExist() bool {

return allExist
}

// CopyFileFromPod copies a file from a pod's container to the local filesystem.
func (c *DataCollector) CopyFileFromPod(namespace, pod, container, srcPath, destPath string, ctx context.Context) error {
cmd := []string{"tar", "cf", "-", "-C", filepath.Dir(srcPath), filepath.Base(srcPath)}
req := c.K8sCoreClientSet.CoreV1().RESTClient().Post().
Namespace(namespace).
Resource("pods").
Name(pod).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Container: container,
Command: cmd,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(c.K8sRestConfig, "POST", req.URL())
if err != nil {
return err
}

// Stream the data from the Pod
var stdout, stderr bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: &stdout,
Stderr: &stderr,
})
if err != nil {
return err
}

// Create a local file to save the output
localFile, err := os.Create(destPath)
if err != nil {
// return fmt.Errorf("failed to create local file: %w", err)
return err
}
defer localFile.Close()

// Untar the stream and write the content to the local file
tarReader := tar.NewReader(&stdout)
for {
header, err := tarReader.Next()

if err == io.EOF {
break // End of tar archive
}
if err != nil {
return err
}

// Ensure the tar file matches the expected file path
if header.Name == filepath.Base(srcPath) {
_, err = io.Copy(localFile, tarReader)
if err != nil {
return fmt.Errorf("failed to write to local file: %w", err)
}
break
}
}

return nil
}
92 changes: 89 additions & 3 deletions pkg/jobs/common_job_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,13 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
"io"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"path/filepath"
"time"

"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func CommonJobList() []Job {
Expand Down Expand Up @@ -88,6 +89,91 @@ func CommonJobList() []Job {
ch <- jobResult
},
},
{
Name: "pv-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve persistent volumes list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "persistentvolumes.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "pvc-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve persistent volume claims list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "persistentvolumeclaims.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "sc-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve storage classes list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "storageclasses.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "apiresources-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerPreferredResources()
if err != nil {
dc.Logger.Printf("\tCould not retrieve API resources list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "apiresources.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "apiversions-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerGroups()
if err != nil {
dc.Logger.Printf("\tCould not retrieve API versions list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "apiversions.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "events-list",
Timeout: time.Second * 10,
Expand Down
22 changes: 12 additions & 10 deletions pkg/jobs/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ package jobs

import (
"context"
"errors"
"fmt"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
"os"
"path/filepath"
"time"

"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
)

type Job struct {
Expand All @@ -35,11 +35,12 @@ type Job struct {
}

type JobResult struct {
Files map[string][]byte
Error error
Files map[string][]byte
Error error
Skipped bool
}

func (j Job) Collect(dc *data_collector.DataCollector) error {
func (j Job) Collect(dc *data_collector.DataCollector) (error, bool) {
ch := make(chan JobResult, 1)

ctx, cancel := context.WithTimeout(context.Background(), j.Timeout)
Expand All @@ -51,28 +52,29 @@ func (j Job) Collect(dc *data_collector.DataCollector) error {
select {
case <-ctx.Done():
dc.Logger.Printf("\tJob %s has timed out: %s\n---\n", j.Name, ctx.Err())
return errors.New(fmt.Sprintf("Context cancelled: %v", ctx.Err()))
err := fmt.Errorf("Context cancelled: %v", ctx.Err())
return err, false

case jobResults := <-ch:
if jobResults.Error != nil {
dc.Logger.Printf("\tJob %s has failed: %s\n", j.Name, jobResults.Error)
return jobResults.Error
return jobResults.Error, jobResults.Skipped
}

for fileName, fileValue := range jobResults.Files {
err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
if err != nil {
return fmt.Errorf("MkdirAll failed: %v", err)
return fmt.Errorf("MkdirAll failed: %v", err), jobResults.Skipped
}
file, _ := os.Create(fileName)
_, err = file.Write(fileValue)
if err != nil {
return fmt.Errorf("Write failed: %v", err)
return fmt.Errorf("Write failed: %v", err), jobResults.Skipped
}
_ = file.Close()
dc.Logger.Printf("\tJob %s wrote %d bytes to %s\n", j.Name, len(fileValue), fileName)
}
dc.Logger.Printf("\tJob %s completed successfully\n---\n", j.Name)
return nil
return nil, jobResults.Skipped
}
}
Loading