Skip to content

Commit

Permalink
test/e2e: nuke obsolete launch/terminate API.
Browse files Browse the repository at this point in the history
Remove old launch and terminate script API functions which were
used to start/stop test plugins using daemonset deployment files.
These are obsoleted by the recently introduced helm versions and
should be unused.

Signed-off-by: Krisztian Litkey <[email protected]>
  • Loading branch information
klihub committed Nov 3, 2023
1 parent 1e18cef commit 70e4aed
Showing 1 changed file with 0 additions and 129 deletions.
129 changes: 0 additions & 129 deletions test/e2e/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -242,135 +242,6 @@ instantiate() { # script API
echo "$RESULT"
}

launch() { # script API
# Usage: launch TARGET
#
# Supported TARGETs:
# nri-resource-policy: launch nri-resource-policy on VM. Environment variables:
# nri_resource_policy_cfg: configuration filepath (on host)
# nri_resource_policy_extra_args: extra arguments on command line
# nri_resource_policy_config: "force" (default) or "fallback"
# k8scri: if the CRI pipe starts with nri-resource-policy
# this launches nri-resource-policy as a proxy,
# otherwise as a dynamic NRI plugin.
#
# nri-resource-policy-daemonset:
# launch nri-resource-policy on VM using Kubernetes DaemonSet
#
# nri-resource-policy-systemd:
# launch nri-resource-policy on VM using "systemctl start".
# Works when installed with binsrc=packages/<distro>.
# Environment variables:
# nri_resource_policy_cfg: configuration filepath (on host)
#
# Example:
# nri_resource_policy_cfg=/tmp/topology-aware.cfg launch nri-resource-policy

local target="$1"
local launch_cmd
local node_resource_topology_schema="$SRC_DIR/deployment/base/crds/noderesourcetopology_crd.yaml"
local nri_resource_policy_config_option="-${nri_resource_policy_config:-force}-config"
local nri_resource_policy_mode=""

case $target in
"nri-resource-policy-systemd")
host-command "$SCP \"$nri_resource_policy_cfg\" $VM_HOSTNAME:" ||
command-error "copying \"$nri_resource_policy_cfg\" to VM failed"
vm-command "cp \"$(basename "$nri_resource_policy_cfg")\" /etc/nri-resource-policy/fallback.cfg"
vm-command "systemctl daemon-reload ; systemctl start nri-resource-policy" ||
command-error "systemd failed to start nri-resource-policy"
vm-wait-process --timeout 30 nri-resource-policy
vm-command "systemctl is-active nri-resource-policy" || {
vm-command "systemctl status nri-resource-policy"
command-error "nri-resource-policy did not become active after systemctl start"
}
;;

"nri-resource-policy")
if [ "$nri_resource_policy_config" == "fallback" ]; then
nri_resource_policy_deployment_file="/etc/nri-resource-policy/nri-resource-policy-deployment-fallback.yaml"
else
nri_resource_policy_deployment_file="/etc/nri-resource-policy/nri-resource-policy-deployment.yaml"
fi
vm-command "chown $VM_SSH_USER:$VM_SSH_USER /etc/nri-resource-policy/"
vm-command "rm -f /etc/nri-resource-policy/nri-resource-policy.cfg"
host-command "$SCP \"$nri_resource_policy_cfg\" $VM_HOSTNAME:/etc/nri-resource-policy/nri-resource-policy.cfg" || {
command-error "copying \"$nri_resource_policy_cfg\" to VM failed"
}
host-command "$SCP \"$node_resource_topology_schema\" $VM_HOSTNAME:" ||
command-error "copying \"$node_resource_topology_schema\" to VM failed"
vm-command "kubectl delete -f $(basename "$node_resource_topology_schema"); kubectl create -f $(basename "$node_resource_topology_schema")"
vm-command "kubectl apply -f $nri_resource_policy_deployment_file" ||
error "Cannot apply deployment"

if [ "${wait_t}" = "none" ]; then
return 0
fi

# Direct logs to output file
local POD="$(namespace=kube-system wait_t=${wait_t:-120} vm-wait-pod-regexp nri-resource-policy-)"
if [ ! -z "$POD" ]; then
# If the POD contains \n, then the old pod is still there. Wait a sec in this
# case and retry.
local POD_CHECK=$(echo "$POD" | awk 'BEGIN { RS=""; FS="\n"} { print $2 }')
if [ ! -z "$POD_CHECK" ]; then
sleep 1
POD="$(namespace=kube-system wait_t=${wait_t:-60} vm-wait-pod-regexp nri-resource-policy-)"
if [ -z "$POD" ]; then
error "Cannot figure out pod name"
fi
fi

if [ "$ds_wait_t" != "none" ]; then
# Wait a while so that the status check can get somewhat meaningful status
vm-command "kubectl -n kube-system rollout status daemonset/nri-resource-policy --timeout=${ds_wait_t:-20s}"
if [ $? -ne 0 ]; then
error "Timeout while waiting daemonset/nri-resource-policy to be ready"
fi
fi

# Check if we have anything else than Running status for the pod
status="$(vm-command-q "kubectl get pod "$POD" -n kube-system | tail -1 | awk '{ print \$3 }'")"
if [ "$status" != "Running" ]; then
# Check if nri-resource-policy failed
if vm-command "kubectl logs $POD -n kube-system | tail -1 | grep -q ^F" 2>&1; then
error "Cannot start nri-resource-policy"
fi
fi

vm-command "fuser --kill nri-resource-policy.output.txt 2>/dev/null"
vm-command "kubectl -n kube-system logs "$POD" -f >nri-resource-policy.output.txt 2>&1 &"

vm-port-forward-enable
else
error "nri-resource-policy pod not found"
fi
;;

*)
error "launch: invalid target \"$1\""
;;
esac
return 0
}

terminate() { # script API
# Usage: terminate TARGET
#
# Supported TARGETs:
# nri-resource-policy: stop (kill) nri-resource-policy.
local target="$1"
case $target in
"nri-resource-policy")
vm-port-forward-disable
vm-command "kubectl delete -f /etc/nri-resource-policy/nri-resource-policy-deployment.yaml"
;;
*)
error "terminate: invalid target \"$target\""
;;
esac
}

helm-launch() { # script API
# Usage: helm-launch TARGET
#
Expand Down

0 comments on commit 70e4aed

Please sign in to comment.