Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test branch #7

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ global:
type: {{.INDEXING_TYPE}}
measurements:
- name: podLatency
thresholds:
- conditionType: Ready
metric: P99
threshold: {{.POD_READY_THRESHOLD}}
jobs:
- name: cluster-density-ms
namespace: cluster-density-ms
Expand Down
2 changes: 1 addition & 1 deletion cmd/kube-burner/ocp-config/cluster-density-v2/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ metadata:
spec:
resources:
requests:
cpu: 20m
cpu: 70m
memory: "10Mi"
nodeSelector:
node-role.kubernetes.io/worker: ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ global:
type: {{.INDEXING_TYPE}}
measurements:
- name: podLatency
thresholds:
- conditionType: Ready
metric: P99
threshold: {{.POD_READY_THRESHOLD}}
jobs:
- name: cluster-density-v2
namespace: cluster-density-v2
Expand Down
4 changes: 4 additions & 0 deletions cmd/kube-burner/ocp-config/cluster-density/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ apiVersion: build.openshift.io/v1
metadata:
name: {{.JobName}}-{{.Replica}}
spec:
resources:
requests:
cpu: 70m
memory: "10Mi"
nodeSelector:
node-role.kubernetes.io/worker: ""
serviceAccount: builder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ global:
type: {{.INDEXING_TYPE}}
measurements:
- name: podLatency
thresholds:
- conditionType: Ready
metric: P99
threshold: {{.POD_READY_THRESHOLD}}
jobs:
- name: cluster-density
namespace: cluster-density
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ global:
type: {{.INDEXING_TYPE}}
measurements:
- name: podLatency
thresholds:
- conditionType: Ready
metric: P99
threshold: {{.POD_READY_THRESHOLD}}
jobs:
- name: node-density-cni
namespace: node-density-cni
Expand Down
47 changes: 18 additions & 29 deletions cmd/kube-burner/ocp.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ package main
import (
"embed"
_ "embed"
"fmt"
"os"
"strings"
"time"

"github.com/cloud-bulldozer/go-commons/indexers"
Expand All @@ -38,44 +36,35 @@ func openShiftCmd() *cobra.Command {
Short: "OpenShift wrapper",
Long: `This subcommand is meant to be used against OpenShift clusters and serve as a shortcut to trigger well-known workloads`,
}
var workloadConfig workloads.Config
var wh workloads.WorkloadHelper
var indexingType indexers.IndexerType
esServer := ocpCmd.PersistentFlags().String("es-server", "", "Elastic Search endpoint")
ocpCmd.PersistentFlags().StringVar(&workloadConfig.EsServer, "es-server", "", "Elastic Search endpoint")
ocpCmd.PersistentFlags().StringVar(&workloadConfig.Esindex, "es-index", "", "Elastic Search index")
localIndexing := ocpCmd.PersistentFlags().Bool("local-indexing", false, "Enable local indexing")
esIndex := ocpCmd.PersistentFlags().String("es-index", "", "Elastic Search index")
metricsEndpoint := ocpCmd.PersistentFlags().String("metrics-endpoint", "", "YAML file with a list of metric endpoints")
alerting := ocpCmd.PersistentFlags().Bool("alerting", true, "Enable alerting")
uuid := ocpCmd.PersistentFlags().String("uuid", uid.NewV4().String(), "Benchmark UUID")
timeout := ocpCmd.PersistentFlags().Duration("timeout", 4*time.Hour, "Benchmark timeout")
qps := ocpCmd.PersistentFlags().Int("qps", 20, "QPS")
burst := ocpCmd.PersistentFlags().Int("burst", 20, "Burst")
gc := ocpCmd.PersistentFlags().Bool("gc", true, "Garbage collect created namespaces")
gcMetrics := ocpCmd.PersistentFlags().Bool("gc-metrics", false, "Collect metrics during garbage collection")
ocpCmd.PersistentFlags().StringVar(&workloadConfig.MetricsEndpoint, "metrics-endpoint", "", "YAML file with a list of metric endpoints")
ocpCmd.PersistentFlags().BoolVar(&workloadConfig.Alerting, "alerting", true, "Enable alerting")
ocpCmd.PersistentFlags().StringVar(&workloadConfig.UUID, "uuid", uid.NewV4().String(), "Benchmark UUID")
ocpCmd.PersistentFlags().DurationVar(&workloadConfig.Timeout, "timeout", 4*time.Hour, "Benchmark timeout")
ocpCmd.PersistentFlags().IntVar(&workloadConfig.QPS, "qps", 20, "QPS")
ocpCmd.PersistentFlags().IntVar(&workloadConfig.Burst, "burst", 20, "Burst")
ocpCmd.PersistentFlags().BoolVar(&workloadConfig.Gc, "gc", true, "Garbage collect created namespaces")
ocpCmd.PersistentFlags().BoolVar(&workloadConfig.GcMetrics, "gc-metrics", false, "Collect metrics during garbage collection")
userMetadata := ocpCmd.PersistentFlags().String("user-metadata", "", "User provided metadata file, in YAML format")
extract := ocpCmd.PersistentFlags().Bool("extract", false, "Extract workload in the current directory")
profileType := ocpCmd.PersistentFlags().String("profile-type", "both", "Metrics profile to use, supported options are: regular, reporting or both")
ocpCmd.PersistentFlags().StringVar(&workloadConfig.ProfileType, "profile-type", "both", "Metrics profile to use, supported options are: regular, reporting or both")
ocpCmd.PersistentFlags().BoolVar(&workloadConfig.Reporting, "reporting", false, "Enable benchmark report indexing")
ocpCmd.MarkFlagsRequiredTogether("es-server", "es-index")
ocpCmd.MarkFlagsMutuallyExclusive("es-server", "local-indexing")
ocpCmd.PersistentPreRun = func(cmd *cobra.Command, args []string) {
rootCmd.PersistentPreRun(cmd, args)
if *esServer != "" || *localIndexing {
if *esServer != "" {
indexingType = indexers.ElasticIndexer
if workloadConfig.EsServer != "" || *localIndexing {
if workloadConfig.EsServer != "" {
workloadConfig.Indexer = indexers.ElasticIndexer
} else {
indexingType = indexers.LocalIndexer
workloadConfig.Indexer = indexers.LocalIndexer
}
}
envVars := map[string]string{
"ES_SERVER": strings.TrimSuffix(*esServer, "/"),
"ES_INDEX": *esIndex,
"QPS": fmt.Sprintf("%d", *qps),
"BURST": fmt.Sprintf("%d", *burst),
"GC": fmt.Sprintf("%v", *gc),
"GC_METRICS": fmt.Sprintf("%v", *gcMetrics),
"INDEXING_TYPE": string(indexingType),
}
wh = workloads.NewWorkloadHelper(envVars, *alerting, *profileType, ocpConfig, *timeout, *metricsEndpoint)
wh.Metadata.UUID = *uuid
wh = workloads.NewWorkloadHelper(workloadConfig, ocpConfig)
if *extract {
if err := wh.ExtractWorkload(cmd.Name(), workloads.MetricsProfileMap[cmd.Name()]); err != nil {
log.Fatal(err)
Expand Down
4 changes: 4 additions & 0 deletions examples/workloads/cluster-density/templates/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ apiVersion: build.openshift.io/v1
metadata:
name: {{.JobName}}-{{.Replica}}
spec:
resources:
requests:
cpu: 70m
memory: "10Mi"
nodeSelector:
node-role.kubernetes.io/worker: ""
serviceAccount: builder
Expand Down
1 change: 1 addition & 0 deletions foo
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
hello there!
1 change: 0 additions & 1 deletion mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ nav:
- OpenShift Wrapper: ocp.md
- Contributing:
- contributing/index.md
- Contributing: contributing/index.md
- GitHub Workflows:
- contributing/pullrequest.md
- contributing/release.md
Expand Down
114 changes: 62 additions & 52 deletions pkg/burner/waiters.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,50 +31,51 @@ import (
)

func (ex *Executor) waitForObjects(ns string, limiter *rate.Limiter) {
limiter.Wait(context.TODO())
for _, obj := range ex.objects {
if obj.Wait {
if obj.WaitOptions.ForCondition != "" {
if !obj.Namespaced {
ns = ""
}
waitForCondition(obj.gvr, ns, obj.WaitOptions.ForCondition, ex.MaxWaitTimeout)
} else {
switch obj.kind {
case "Deployment":
waitForDeployments(ns, ex.MaxWaitTimeout)
case "ReplicaSet":
waitForRS(ns, ex.MaxWaitTimeout)
case "ReplicationController":
waitForRC(ns, ex.MaxWaitTimeout)
case "StatefulSet":
waitForStatefulSet(ns, ex.MaxWaitTimeout)
case "DaemonSet":
waitForDS(ns, ex.MaxWaitTimeout)
case "Pod":
waitForPod(ns, ex.MaxWaitTimeout)
case "Build", "BuildConfig":
waitForBuild(ns, ex.MaxWaitTimeout, obj.Replicas)
case "VirtualMachine":
waitForVM(ns, ex.MaxWaitTimeout)
case "VirtualMachineInstance":
waitForVMI(ns, ex.MaxWaitTimeout)
case "VirtualMachineInstanceReplicaSet":
waitForVMIRS(ns, ex.MaxWaitTimeout)
case "Job":
waitForJob(ns, ex.MaxWaitTimeout)
case "PersistentVolumeClaim":
waitForPVC(ns, ex.MaxWaitTimeout)
}
if !obj.Wait {
continue
}
if obj.WaitOptions.ForCondition != "" {
if !obj.Namespaced {
ns = ""
}
waitForCondition(obj.gvr, ns, obj.WaitOptions.ForCondition, ex.MaxWaitTimeout, limiter)
} else {
switch obj.kind {
case "Deployment":
waitForDeployments(ns, ex.MaxWaitTimeout, limiter)
case "ReplicaSet":
waitForRS(ns, ex.MaxWaitTimeout, limiter)
case "ReplicationController":
waitForRC(ns, ex.MaxWaitTimeout, limiter)
case "StatefulSet":
waitForStatefulSet(ns, ex.MaxWaitTimeout, limiter)
case "DaemonSet":
waitForDS(ns, ex.MaxWaitTimeout, limiter)
case "Pod":
waitForPod(ns, ex.MaxWaitTimeout, limiter)
case "Build", "BuildConfig":
waitForBuild(ns, ex.MaxWaitTimeout, obj.Replicas, limiter)
case "VirtualMachine":
waitForVM(ns, ex.MaxWaitTimeout, limiter)
case "VirtualMachineInstance":
waitForVMI(ns, ex.MaxWaitTimeout, limiter)
case "VirtualMachineInstanceReplicaSet":
waitForVMIRS(ns, ex.MaxWaitTimeout, limiter)
case "Job":
waitForJob(ns, ex.MaxWaitTimeout, limiter)
case "PersistentVolumeClaim":
waitForPVC(ns, ex.MaxWaitTimeout, limiter)
}
}
}
log.Infof("Actions in namespace %v completed", ns)
}

func waitForDeployments(ns string, maxWaitTimeout time.Duration) {
func waitForDeployments(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
// TODO handle errors such as timeouts
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
deps, err := ClientSet.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -89,8 +90,9 @@ func waitForDeployments(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForRS(ns string, maxWaitTimeout time.Duration) {
func waitForRS(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
rss, err := ClientSet.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -105,8 +107,9 @@ func waitForRS(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForStatefulSet(ns string, maxWaitTimeout time.Duration) {
func waitForStatefulSet(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
stss, err := ClientSet.AppsV1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -121,8 +124,9 @@ func waitForStatefulSet(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForPVC(ns string, maxWaitTimeout time.Duration) {
func waitForPVC(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
pvc, err := ClientSet.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: "status.phase!=Bound"})
if err != nil {
return false, err
Expand All @@ -131,8 +135,9 @@ func waitForPVC(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForRC(ns string, maxWaitTimeout time.Duration) {
func waitForRC(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
rcs, err := ClientSet.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -147,8 +152,9 @@ func waitForRC(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForDS(ns string, maxWaitTimeout time.Duration) {
func waitForDS(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
dss, err := ClientSet.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -163,8 +169,9 @@ func waitForDS(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForPod(ns string, maxWaitTimeout time.Duration) {
func waitForPod(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
pods, err := ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: "status.phase!=Running"})
if err != nil {
return false, err
Expand All @@ -173,7 +180,7 @@ func waitForPod(ns string, maxWaitTimeout time.Duration) {
})
}

func waitForBuild(ns string, maxWaitTimeout time.Duration, expected int) {
func waitForBuild(ns string, maxWaitTimeout time.Duration, expected int, limiter *rate.Limiter) {
buildStatus := []string{"New", "Pending", "Running"}
var build types.UnstructuredContent
gvr := schema.GroupVersionResource{
Expand All @@ -182,6 +189,7 @@ func waitForBuild(ns string, maxWaitTimeout time.Duration, expected int) {
Resource: types.OpenShiftBuildResource,
}
wait.PollUntilContextTimeout(context.TODO(), time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
builds, err := DynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -207,23 +215,24 @@ func waitForBuild(ns string, maxWaitTimeout time.Duration, expected int) {
})
}

func waitForJob(ns string, maxWaitTimeout time.Duration) {
func waitForJob(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
gvr := schema.GroupVersionResource{
Group: "batch",
Version: "v1",
Resource: "jobs",
}
verifyCondition(gvr, ns, "Complete", maxWaitTimeout)
verifyCondition(gvr, ns, "Complete", maxWaitTimeout, limiter)
}

func waitForCondition(gvr schema.GroupVersionResource, ns, condition string, maxWaitTimeout time.Duration) {
verifyCondition(gvr, ns, condition, maxWaitTimeout)
func waitForCondition(gvr schema.GroupVersionResource, ns, condition string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
verifyCondition(gvr, ns, condition, maxWaitTimeout, limiter)
}

func verifyCondition(gvr schema.GroupVersionResource, ns, condition string, maxWaitTimeout time.Duration) {
func verifyCondition(gvr schema.GroupVersionResource, ns, condition string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
var uObj types.UnstructuredContent
wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
var objs *unstructured.UnstructuredList
limiter.Wait(context.TODO())
if ns != "" {
objs, err = DynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})
} else {
Expand Down Expand Up @@ -256,32 +265,33 @@ func verifyCondition(gvr schema.GroupVersionResource, ns, condition string, maxW
})
}

func waitForVM(ns string, maxWaitTimeout time.Duration) {
func waitForVM(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
vmGVR := schema.GroupVersionResource{
Group: types.KubevirtGroup,
Version: types.KubevirtAPIVersion,
Resource: types.VirtualMachineResource,
}
verifyCondition(vmGVR, ns, "Ready", maxWaitTimeout)
verifyCondition(vmGVR, ns, "Ready", maxWaitTimeout, limiter)
}

func waitForVMI(ns string, maxWaitTimeout time.Duration) {
func waitForVMI(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
vmiGVR := schema.GroupVersionResource{
Group: types.KubevirtGroup,
Version: types.KubevirtAPIVersion,
Resource: types.VirtualMachineInstanceResource,
}
verifyCondition(vmiGVR, ns, "Ready", maxWaitTimeout)
verifyCondition(vmiGVR, ns, "Ready", maxWaitTimeout, limiter)
}

func waitForVMIRS(ns string, maxWaitTimeout time.Duration) {
func waitForVMIRS(ns string, maxWaitTimeout time.Duration, limiter *rate.Limiter) {
var rs types.UnstructuredContent
vmiGVRRS := schema.GroupVersionResource{
Group: types.KubevirtGroup,
Version: types.KubevirtAPIVersion,
Resource: types.VirtualMachineInstanceReplicaSetResource,
}
wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, maxWaitTimeout, true, func(ctx context.Context) (done bool, err error) {
limiter.Wait(context.TODO())
objs, err := DynamicClient.Resource(vmiGVRRS).Namespace(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Debugf("VMIRS error %v", err)
Expand Down
Loading
Loading