Skip to content

Commit

Permalink
Allow backends selection from other namespaces (#66)
Browse files Browse the repository at this point in the history
* Implement backends selector from multiple namespaces. An added `namespaces` field allows to specify the namespace(s) where the operator should look for backend pods. By default looks in the same namespace where the VarnishCluster is deployed.

Signed-off-by: Tomash Sidei <[email protected]>

* Minor doc fix -- sneaking into Tomash's PR

Co-authored-by: Craig Ingram <[email protected]>
  • Loading branch information
tomashibm and Craig Ingram authored May 11, 2022
1 parent 6d709d5 commit 1293193
Show file tree
Hide file tree
Showing 20 changed files with 786 additions and 1,150 deletions.
1 change: 1 addition & 0 deletions api/v1alpha1/varnishcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ type VarnishClusterBackend struct {
Selector map[string]string `json:"selector,omitempty"`
// +kubebuilder:validation:Required
Port *intstr.IntOrString `json:"port,omitempty"`
Namespaces []string `json:"namespaces,omitempty"`
ZoneBalancing *VarnishClusterBackendZoneBalancing `json:"zoneBalancing,omitempty"`
}

Expand Down
7 changes: 6 additions & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion cmd/varnish-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ func main() {
controllerMetrics.Registry.MustRegister(vMetrics.VCLCompilationError)

mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
Namespace: varnishControllerConfig.Namespace,
Scheme: scheme,
HealthProbeBindAddress: fmt.Sprintf(":%d", v1alpha1.HealthCheckPort),
MetricsBindAddress: fmt.Sprintf(":%d", v1alpha1.VarnishControllerMetricsPort),
Expand Down
4 changes: 4 additions & 0 deletions config/crd/bases/caching.ibm.com_varnishclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -864,6 +864,10 @@ spec:
type: object
backend:
properties:
namespaces:
items:
type: string
type: array
port:
anyOf:
- type: integer
Expand Down
4 changes: 2 additions & 2 deletions docs/quick-start.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ varnish-operator-fd96f48f-gn6mc 1/1 Running 0 40s
1. Create a simple backend that will be cached by Varnish:

```bash
$ kubectl create deployment nginx-backend --image nginx -n varnish-cluster
$ kubectl create deployment nginx-backend --image nginx -n varnish-cluster --port=80
deployment.apps/nginx-backend created
$ kubectl get deployment -n varnish-cluster nginx-backend --show-labels #get pod labels, they will be used to identify your backend pods
NAME READY UP-TO-DATE AVAILABLE AGE LABELS
Expand Down Expand Up @@ -85,7 +85,7 @@ You can check if all works by doing `kubectl port-forward` and checking the serv
Port forward your service:
```bash
$ kubectl port-forward -n varnish-cluster service/varnishcluster-example 8080:80
$ kubectl port-forward -n varnish-cluster service/varnishcluster-example 8080:6081
Forwarding from 127.0.0.1:8080 -> 6081
Forwarding from [::1]:8080 -> 6081
...
Expand Down
5 changes: 3 additions & 2 deletions docs/varnish-cluster-configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
| Field | Description | Is Required |
|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
| `affinity ` | [Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) settings for the pods. It allows you to configure onto which nodes Varnish pods should prefer being scheduled. | `optional` |
| `backend.port ` | The port of the backend pods being cached by Varnish. Can be port name or port number. | `optional` |
| `backend.selector ` | The selector used to identify the backend Pods. | `optional` |
| `backend.namespaces ` | Namespace(s) to look for backend pods. By default - namespace the VarnishCluster is deployed to. | `required` |
| `backend.port ` | The port of the backend pods being cached by Varnish. Can be port name or port number. | `required` |
| `backend.selector ` | The selector used to identify the backend Pods. | `required` |
| `backend.zoneBalancing ` | Controls Varnish backend topology aware routing which can assign weights to backends according to their geographical location. | `optional` |
| `backend.zoneBalancing.type ` | Varnish backend zone-balancing type. Accepted values: `disabled`, `auto`, `thresholds` | `optional` |
| `backend.zoneBalancing.thresholds ` | Array of thresholds objects to determine condition and respective weights to be assigned to backends: `threshold`, `local` - local backend weight, `remote` - remote backend weight | `optional` |
Expand Down
15 changes: 15 additions & 0 deletions pkg/varnishcluster/controller/varnishcluster_clusterrole.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,21 @@ func (r *ReconcileVarnishCluster) reconcileClusterRole(ctx context.Context, inst
Resources: []string{"nodes"},
Verbs: []string{"list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"list", "watch", "get", "update"},
},
{
APIGroups: []string{"caching.ibm.com"},
Resources: []string{"varnishclusters"},
Verbs: []string{"list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"secrets", "configmaps"},
Verbs: []string{"list", "get", "watch"},
},
},
}

Expand Down
17 changes: 1 addition & 16 deletions pkg/varnishcluster/controller/varnishcluster_role.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,29 +28,14 @@ func (r *ReconcileVarnishCluster) reconcileRole(ctx context.Context, instance *v
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"endpoints", "configmaps"},
Verbs: []string{"list", "watch"},
},
{
APIGroups: []string{"caching.ibm.com"},
Resources: []string{"varnishclusters"},
Resources: []string{"endpoints"},
Verbs: []string{"list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"create", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"list", "get", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get", "watch"},
},
},
}

Expand Down
74 changes: 52 additions & 22 deletions pkg/varnishcontroller/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import (
"strings"
"time"

ctrlBuilder "sigs.k8s.io/controller-runtime/pkg/builder"

"github.com/ibm/varnish-operator/api/v1alpha1"
"github.com/ibm/varnish-operator/pkg/logger"
"github.com/ibm/varnish-operator/pkg/varnishcontroller/config"
Expand Down Expand Up @@ -42,14 +44,24 @@ type PodInfo struct {
// SetupVarnishReconciler creates a new VarnishCluster Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func SetupVarnishReconciler(mgr manager.Manager, cfg *config.Config, varnish varnishadm.VarnishAdministrator, metrics *metrics.VarnishControllerMetrics, logr *logger.Logger) error {
backendsLabels, err := labels.ConvertSelectorToLabelsMap(cfg.EndpointSelectorString)
if err != nil {
return err
}

backendNamespacePredicate := predicates.NewNamespacesMatcherPredicate([]string{cfg.Namespace}, logr)
backendLabelsPredicate := predicates.NewLabelMatcherPredicate(backendsLabels.AsSelector(), logr)

r := &ReconcileVarnish{
config: cfg,
logger: logr,
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
varnish: varnish,
eventHandler: events.NewEventHandler(mgr.GetEventRecorderFor(events.EventRecorderName), cfg.PodName),
metrics: metrics,
config: cfg,
logger: logr,
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
varnish: varnish,
eventHandler: events.NewEventHandler(mgr.GetEventRecorderFor(events.EventRecorderName), cfg.PodName),
metrics: metrics,
backendsSelectorPredicate: backendLabelsPredicate,
backendsNamespacePredicate: backendNamespacePredicate,
}

podMapFunc := handler.EnqueueRequestsFromMapFunc(
Expand All @@ -65,22 +77,31 @@ func SetupVarnishReconciler(mgr manager.Manager, cfg *config.Config, varnish var
builder := ctrl.NewControllerManagedBy(mgr)
builder.Named("varnish-controller")

builder.For(&v1alpha1.VarnishCluster{})
builder.Watches(&source.Kind{Type: &v1.Endpoints{}}, podMapFunc)
builder.For(&v1alpha1.VarnishCluster{}, ctrlBuilder.WithPredicates(predicates.NewVarnishClusterPredicate(r.config.VarnishClusterUID, logr)))

backendsLabels, err := labels.ConvertSelectorToLabelsMap(cfg.EndpointSelectorString)
if err != nil {
return err
}
builder.Watches(
&source.Kind{Type: &v1.Pod{}},
podMapFunc,
ctrlBuilder.WithPredicates(
backendNamespacePredicate,
backendLabelsPredicate,
),
)

varnishPodsSelector := labels.SelectorFromSet(labels.Set{
v1alpha1.LabelVarnishOwner: cfg.VarnishClusterName,
v1alpha1.LabelVarnishComponent: v1alpha1.VarnishComponentCacheService,
v1alpha1.LabelVarnishUID: string(cfg.VarnishClusterUID),
})

endpointsSelectors := []labels.Selector{labels.SelectorFromSet(backendsLabels), varnishPodsSelector}
builder.WithEventFilter(predicates.NewVarnishControllerPredicate(cfg.VarnishClusterUID, endpointsSelectors, nil))
builder.Watches(
&source.Kind{Type: &v1.Pod{}},
podMapFunc,
ctrlBuilder.WithPredicates(
predicates.NewNamespacesMatcherPredicate([]string{cfg.Namespace}, logr),
predicates.NewLabelMatcherPredicate(varnishPodsSelector, logr),
),
)
//builder.WithEventFilter(predicates.NewDebugPredicate(logr))

return builder.Complete(r)
}
Expand All @@ -89,12 +110,14 @@ var _ reconcile.Reconciler = &ReconcileVarnish{}

type ReconcileVarnish struct {
client.Client
config *config.Config
logger *logger.Logger
scheme *runtime.Scheme
eventHandler *events.EventHandler
varnish varnishadm.VarnishAdministrator
metrics *metrics.VarnishControllerMetrics
config *config.Config
logger *logger.Logger
scheme *runtime.Scheme
eventHandler *events.EventHandler
varnish varnishadm.VarnishAdministrator
metrics *metrics.VarnishControllerMetrics
backendsNamespacePredicate *predicates.NamespacesMatcherPredicate
backendsSelectorPredicate *predicates.LabelMatcherPredicate
}

func (r *ReconcileVarnish) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
Expand Down Expand Up @@ -135,6 +158,13 @@ func (r *ReconcileVarnish) reconcileWithContext(ctx context.Context, request rec

r.scheme.Default(vc)

if len(vc.Spec.Backend.Namespaces) > 0 {
r.backendsNamespacePredicate.Namespaces = vc.Spec.Backend.Namespaces
} else {
r.backendsNamespacePredicate.Namespaces = []string{r.config.Namespace}
}
r.backendsSelectorPredicate.Selector = labels.SelectorFromSet(vc.Spec.Backend.Selector)

varnishPort := int32(v1alpha1.VarnishPort)
entrypointFileName := *vc.Spec.VCL.EntrypointFileName

Expand Down
78 changes: 46 additions & 32 deletions pkg/varnishcontroller/controller/endpoints.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,24 +15,28 @@ import (
)

func (r *ReconcileVarnish) getBackendEndpoints(ctx context.Context, vc *v1alpha1.VarnishCluster) ([]PodInfo, int32, float64, float64, error) {

varnishNodeLabels, err := r.getNodeLabels(ctx, r.config.NodeName)
if err != nil {
return nil, 0, 0, 0, errors.WithStack(err)
}

// Check for deprecated topology labels
zoneLabel := "topology.kubernetes.io/zone"
if _, ok := varnishNodeLabels["failure-domain.beta.kubernetes.io/zone"]; ok {
zoneLabel = "failure-domain.beta.kubernetes.io/zone"
zoneLabel := v1.LabelTopologyZone
if _, ok := varnishNodeLabels[v1.LabelFailureDomainBetaZone]; ok {
zoneLabel = v1.LabelFailureDomainBetaZone
}

currentZone := varnishNodeLabels[zoneLabel]

actualLocalWeight := 1.0
actualRemoteWeight := 1.0

backendList, portNumber, err := r.getPodsInfo(ctx, r.config.EndpointSelector, *vc.Spec.Backend.Port)
namespaces := []string{r.config.Namespace}
if len(vc.Spec.Backend.Namespaces) > 0 {
namespaces = vc.Spec.Backend.Namespaces
}

backendList, portNumber, err := r.getPodsInfo(ctx, namespaces, labels.SelectorFromSet(vc.Spec.Backend.Selector), *vc.Spec.Backend.Port)
if err != nil {
return nil, 0, 0, 0, errors.WithStack(err)
}
Expand Down Expand Up @@ -95,47 +99,58 @@ func (r *ReconcileVarnish) getBackendEndpoints(ctx context.Context, vc *v1alpha1
}

func (r *ReconcileVarnish) getVarnishEndpoints(ctx context.Context, vc *v1alpha1.VarnishCluster) ([]PodInfo, error) {

labels := labels.SelectorFromSet(vclabels.CombinedComponentLabels(vc, v1alpha1.VarnishComponentCacheService))
varnishLables := labels.SelectorFromSet(vclabels.CombinedComponentLabels(vc, v1alpha1.VarnishComponentVarnish))
varnishPort := intstr.FromString(v1alpha1.VarnishPortName)

varnishEndpoints, _, err := r.getPodsInfo(ctx, labels, varnishPort)
varnishEndpoints, _, err := r.getPodsInfo(ctx, []string{r.config.Namespace}, varnishLables, varnishPort)
if err != nil {
return nil, errors.WithStack(err)
}

return varnishEndpoints, nil
}

func (r *ReconcileVarnish) getPodsInfo(ctx context.Context, labels labels.Selector, validPort intstr.IntOrString) ([]PodInfo, int32, error) {

found := &v1.EndpointsList{}
err := r.List(ctx, found, client.MatchingLabelsSelector{Selector: labels}, client.InNamespace(r.config.Namespace))
if err != nil {
return nil, 0, errors.Wrapf(err, "could not retrieve endpoints from namespace %s with labels %s", r.config.Namespace, labels.String())
}
func (r *ReconcileVarnish) getPodsInfo(ctx context.Context, namespaces []string, labels labels.Selector, validPort intstr.IntOrString) ([]PodInfo, int32, error) {
var pods []v1.Pod
for _, namespace := range namespaces {
listOptions := []client.ListOption{
client.MatchingLabelsSelector{Selector: labels},
client.InNamespace(namespace),
}
found := &v1.PodList{}
err := r.List(ctx, found, listOptions...)
if err != nil {
return nil, 0, errors.Wrapf(err, "could not retrieve endpoints from namespace %v with labels %s", namespaces, labels.String())
}

if len(found.Items) == 0 {
return nil, 0, errors.Errorf("no endpoints from namespace %s matching labels %s", r.config.Namespace, labels.String())
pods = append(pods, found.Items...)
}

var portNumber int32
var podInfoList []PodInfo
for _, endpoints := range found.Items {
for _, endpoint := range endpoints.Subsets {
for _, address := range append(endpoint.Addresses, endpoint.NotReadyAddresses...) {
for _, port := range endpoint.Ports {
if port.Port == validPort.IntVal || port.Name == validPort.StrVal {
portNumber = port.Port
var backendWeight float64 = 1.0
nodeLabels, err := r.getNodeLabels(ctx, *address.NodeName)
if err != nil {
return nil, 0, errors.WithStack(err)
}
b := PodInfo{IP: address.IP, NodeLabels: nodeLabels, PodName: address.TargetRef.Name, Weight: backendWeight}
podInfoList = append(podInfoList, b)
break

if len(pods) == 0 {
r.logger.Infof("No pods found by labels %v in namespace(s) %v", labels.String(), namespaces)
return podInfoList, 0, nil
}

for _, pod := range pods {
if len(pod.Status.PodIP) == 0 || len(pod.Spec.NodeName) == 0 {
continue
}

for _, container := range pod.Spec.Containers {
for _, containerPort := range container.Ports {
if containerPort.ContainerPort == validPort.IntVal || containerPort.Name == validPort.StrVal {
portNumber = containerPort.ContainerPort
var backendWeight = 1.0
nodeLabels, err := r.getNodeLabels(ctx, pod.Spec.NodeName)
if err != nil {
return nil, 0, errors.WithStack(err)
}
b := PodInfo{IP: pod.Status.PodIP, NodeLabels: nodeLabels, PodName: pod.Name, Weight: backendWeight}
podInfoList = append(podInfoList, b)
break
}
}
}
Expand Down Expand Up @@ -185,7 +200,6 @@ func calculateBackendRatio(backends []PodInfo, currentZone string, zoneLabel str
}

func checkMultizone(endpoints []PodInfo, zoneLabel string, currentZone string) bool {

for _, b := range endpoints {
if _, ok := b.NodeLabels[zoneLabel]; ok {
if b.NodeLabels[zoneLabel] != currentZone {
Expand Down
Loading

0 comments on commit 1293193

Please sign in to comment.